Re adding 0.22.0 sources, removing publish folder as it needed to be renamed to content for use with the ASF website publishing pipeline.
diff --git a/publish/assets/css/main.css b/publish/assets/css/main.css
deleted file mode 100644
index d4aa2c3..0000000
--- a/publish/assets/css/main.css
+++ /dev/null
@@ -1,79 +0,0 @@
-body {
-	background: rgba(0,0,0,0.04);
-}
-.container.content {
-	background: #fff;
-}
-h1, h2, h3 {
-	color: #004a63;
-}
-span.glyphicon {
-    font-size: 6em;
-	padding-top: 25px;
-}
-.section-ltgreen {
-	background: #40c3b0;
-	color: #fff;
-	font-size: 1.3em;
-	text-shadow: .5px .5px 0px #1b5d3e;
-}
-.section-ltgreen h2 {
-	color: #fff;
-}
-.section-footer {
-	background: #eee;
-}
-.section-homepage-header {
-	background-image: url("/assets/img/aurora_image_1600.jpg");
-	background-size: cover;
-	background-position: center bottom;
-}
-.section-homepage-header h1 {
-	color: #fff;
-	text-shadow: 1px 1px 3px #000;
-}
-.section-header {
-	background-image: url("/assets/img/aurora_image_1600.jpg");
-	background-size: cover;
-	color: #000;
-	background-color: #40c3b0;
-}
-.nav-bar a {
-	color: #fff;
-}
-.navbar li a:hover {
-	text-decoration-color: #40c3b0;
-	color: #fff;
-}
-.nav-bar li a:hover {
-	background: none;
-	text-decoration: underline;
-}
-.nav-bar li {
-	margin: 10px;
-	padding-top: 20px;
-}
-.buffer {
-	padding: 30px 0 30px 0;
-}
-.disclaimer {
-	font-size: .9em;
-	color: #AAA;
-}
-iframe {
-	border: none;
-}
-.author_contact {
-	display: inline-block;
-}
-.author_gravatar {
-	display: inline;
-	padding: 0 20px 20px 5px;
-}
-.share {
-	display: block;
-}
-code, pre {
-	color: #000;
-	border: none;
-}
diff --git a/publish/assets/img/apache_incubator_logo.png b/publish/assets/img/apache_incubator_logo.png
deleted file mode 100644
index 5900d82..0000000
--- a/publish/assets/img/apache_incubator_logo.png
+++ /dev/null
Binary files differ
diff --git a/publish/assets/img/aurora_image_1600.jpg b/publish/assets/img/aurora_image_1600.jpg
deleted file mode 100644
index b97a801..0000000
--- a/publish/assets/img/aurora_image_1600.jpg
+++ /dev/null
Binary files differ
diff --git a/publish/assets/img/aurora_logo_dkbkg.svg b/publish/assets/img/aurora_logo_dkbkg.svg
deleted file mode 100644
index 36b714b..0000000
--- a/publish/assets/img/aurora_logo_dkbkg.svg
+++ /dev/null
@@ -1,240 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   version="1.1"
-   width="425"
-   height="157.5"
-   id="svg2"
-   xml:space="preserve"><metadata
-     id="metadata8"><rdf:RDF><cc:Work
-         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
-     id="defs6"><clipPath
-       id="clipPath114"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path116" /></clipPath><clipPath
-       id="clipPath162"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path164" /></clipPath></defs><g
-     transform="matrix(1.25,0,0,-1.25,0,187.5)"
-     id="g10"><path
-       d="M 0,0 330,0 330,150 0,150 0,0 z"
-       inkscape:connector-curvature="0"
-       id="path12"
-       style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
-       transform="translate(89.4268,98.3438)"
-       id="g14"><path
-         d="M 0,0 -18.35,0 -9.16,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path16"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,97.3203)"
-       id="g18"><path
-         d="m 0,0 -9.166,-15.852 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path20"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,79.4082)"
-       id="g22"><path
-         d="M 0,0 -18.354,0 -9.188,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path24"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,78.3828)"
-       id="g26"><path
-         d="m 0,0 -9.219,-15.848 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path28"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,97.3203)"
-       id="g30"><path
-         d="m 0,0 -9.158,-15.852 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path32"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,79.4082)"
-       id="g34"><path
-         d="M 0,0 -18.352,0 -9.193,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path36"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,116.2793)"
-       id="g38"><path
-         d="m 0,0 -9.189,-15.875 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path40"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,60.4707)"
-       id="g42"><path
-         d="M 0,0 -18.381,0 -9.162,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path44"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,78.3828)"
-       id="g46"><path
-         d="m 0,0 -9.186,-15.848 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path48"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,60.4707)"
-       id="g50"><path
-         d="M 0,0 -18.375,0 -9.189,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path52"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,78.3828)"
-       id="g54"><path
-         d="m 0,0 -9.189,-15.848 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path56"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,98.3438)"
-       id="g58"><path
-         d="M 0,0 -18.381,0 -9.162,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path60"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(38.2119,81.4688)"
-       id="g62"><path
-         d="M 0,0 18.348,0 9.162,15.852"
-         inkscape:connector-curvature="0"
-         id="path64"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(47.374,63.5449)"
-       id="g66"><path
-         d="m 0,0 9.186,15.863 -18.348,0"
-         inkscape:connector-curvature="0"
-         id="path68"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(113.0986,97.3203)"
-       id="g70"><path
-         d="m 0,0 -9.156,-15.852 18.345,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path72"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(122.2881,79.4082)"
-       id="g74"><path
-         d="M 0,0 -18.346,0 -9.189,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path76"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,117.3066)"
-       id="g78"><path
-         d="M 0,0 -18.352,0 -9.193,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path80"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(89.4268,60.4707)"
-       id="g82"><path
-         d="M 0,0 -18.379,0 -9.16,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path84"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,59.4492)"
-       id="g86"><path
-         d="m 0,0 -9.158,-15.99 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path88"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,117.3066)"
-       id="g90"><path
-         d="M 0,0 -18.354,0 -9.188,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path92"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,116.2793)"
-       id="g94"><path
-         d="m 0,0 -9.219,-15.875 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path96"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,116.2793)"
-       id="g98"><path
-         d="m 0,0 -9.186,-15.875 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path100"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,98.3438)"
-       id="g102"><path
-         d="M 0,0 -18.375,0 -9.189,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path104"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,59.4492)"
-       id="g106"><path
-         d="m 0,0 -9.166,-15.99 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path108"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       id="g110"><g
-         clip-path="url(#clipPath114)"
-         id="g112"><g
-           transform="translate(180.5576,59.5039)"
-           id="g118"><path
-             d="m 0,0 c -1.295,0 -2.502,0.18 -3.615,0.537 -1.106,0.371 -2.071,0.9 -2.889,1.609 -0.814,0.713 -1.455,1.6 -1.92,2.659 -0.455,1.058 -0.678,2.277 -0.678,3.648 l 0,17.135 2.998,0 0,-17.024 c 0,-0.916 0.145,-1.728 0.454,-2.441 C -5.357,5.412 -4.934,4.814 -4.396,4.336 -3.842,3.852 -3.203,3.488 -2.461,3.238 -1.707,2.979 -0.889,2.85 0,2.85 c 0.896,0 1.709,0.129 2.455,0.388 0.75,0.25 1.395,0.614 1.938,1.098 0.541,0.478 0.968,1.076 1.283,1.787 0.314,0.713 0.467,1.525 0.467,2.441 l 0,17.024 3.003,0 0,-17.135 C 9.146,7.082 8.908,5.863 8.439,4.805 7.971,3.746 7.326,2.859 6.506,2.146 5.689,1.437 4.721,0.908 3.613,0.537 2.506,0.18 1.307,0 0,0"
-             inkscape:connector-curvature="0"
-             id="path120"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(200.9756,73.959)"
-           id="g122"><path
-             d="m 0,0 3.145,0 c 1.06,0 1.935,0.135 2.623,0.412 0.681,0.277 1.236,0.623 1.64,1.035 0.41,0.407 0.701,0.873 0.871,1.373 0.168,0.502 0.254,0.987 0.254,1.453 0,0.518 -0.14,1.03 -0.418,1.534 C 7.838,6.301 7.465,6.729 6.998,7.104 6.527,7.48 5.963,7.77 5.297,7.988 4.635,8.205 3.93,8.316 3.188,8.316 L 0,8.316 0,0 z m -2.994,-14.166 0,25.299 5.965,0 c 0.812,0 1.722,-0.127 2.726,-0.377 0.992,-0.254 1.932,-0.664 2.799,-1.213 0.865,-0.553 1.592,-1.264 2.168,-2.131 0.58,-0.867 0.865,-1.92 0.865,-3.139 0,-1.818 -0.449,-3.328 -1.351,-4.541 C 9.271,-1.486 7.959,-2.287 6.225,-2.674 l 5.416,-11.492 -3.252,0 -5.311,11.277 -0.615,0 -2.463,0 0,-11.277 -2.994,0 z"
-             inkscape:connector-curvature="0"
-             id="path124"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(223.4951,72.4102)"
-           id="g126"><path
-             d="m 0,0 c 0,-1.4 0.26,-2.711 0.771,-3.926 0.522,-1.209 1.235,-2.271 2.137,-3.183 0.905,-0.899 1.961,-1.618 3.16,-2.145 1.202,-0.531 2.5,-0.803 3.866,-0.803 1.377,0 2.662,0.272 3.877,0.803 1.199,0.527 2.253,1.246 3.162,2.145 0.902,0.912 1.607,1.974 2.123,3.183 0.519,1.215 0.773,2.526 0.773,3.926 0,1.393 -0.254,2.699 -0.773,3.914 -0.516,1.219 -1.221,2.281 -2.123,3.203 -0.909,0.916 -1.963,1.639 -3.162,2.168 -1.215,0.528 -2.5,0.793 -3.877,0.793 C 8.568,10.078 7.27,9.813 6.068,9.285 4.869,8.756 3.813,8.033 2.908,7.117 2.006,6.195 1.293,5.133 0.771,3.914 0.26,2.699 0,1.393 0,0 m -3.004,0 c 0,1.752 0.346,3.424 1.016,5.004 0.672,1.572 1.592,2.953 2.759,4.137 1.172,1.179 2.536,2.113 4.104,2.8 1.564,0.684 3.248,1.028 5.027,1.028 1.756,0 3.432,-0.344 5.006,-1.028 1.58,-0.687 2.957,-1.621 4.135,-2.8 1.182,-1.184 2.119,-2.565 2.803,-4.137 0.687,-1.58 1.027,-3.252 1.027,-5.004 0,-1.789 -0.34,-3.463 -1.027,-5.031 -0.684,-1.567 -1.621,-2.932 -2.803,-4.102 -1.178,-1.166 -2.555,-2.092 -4.135,-2.758 -1.574,-0.683 -3.25,-1.015 -5.006,-1.015 -1.779,0 -3.463,0.332 -5.027,1.015 -1.568,0.666 -2.932,1.592 -4.104,2.758 -1.167,1.17 -2.087,2.535 -2.759,4.102 -0.67,1.568 -1.016,3.242 -1.016,5.031"
-             inkscape:connector-curvature="0"
-             id="path128"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(257.3213,73.959)"
-           id="g130"><path
-             d="M 0,0 3.145,0 C 4.207,0 5.08,0.135 5.768,0.412 6.457,0.689 6.994,1.035 7.406,1.447 7.818,1.854 8.109,2.32 8.277,2.82 8.445,3.322 8.529,3.807 8.529,4.273 8.529,4.791 8.387,5.303 8.109,5.807 7.838,6.301 7.465,6.729 6.994,7.104 6.52,7.48 5.957,7.77 5.297,7.988 4.631,8.205 3.932,8.316 3.188,8.316 L 0,8.316 0,0 z m -3,-14.166 0,25.299 5.957,0 c 0.82,0 1.734,-0.127 2.734,-0.377 1,-0.254 1.938,-0.664 2.805,-1.213 0.861,-0.553 1.588,-1.264 2.168,-2.131 0.572,-0.867 0.867,-1.92 0.867,-3.139 0,-1.818 -0.455,-3.328 -1.353,-4.541 C 9.27,-1.486 7.953,-2.287 6.213,-2.674 l 5.43,-11.492 -3.256,0 -5.315,11.277 -0.609,0 -2.463,0 0,-11.277 -3,0 z"
-             inkscape:connector-curvature="0"
-             id="path132"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(157.3545,73.0156)"
-           id="g134"><path
-             d="M 0,0 C -1.854,4.389 -3.719,8.797 -5.596,13.234 -6.105,12.037 -6.6,10.871 -7.104,9.678 l 3.586,-8.463 1.889,-4.424 0.006,0 c 0.705,-1.666 1.406,-3.334 2.117,-5.012 0.709,-1.67 1.41,-3.338 2.113,-5.002 l 3.004,0 C 3.729,-8.787 1.854,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path136"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(147.792,69.8066)"
-           id="g138"><path
-             d="m 0,0 1.738,4.076 0.004,0 C 1.994,4.643 2.221,5.205 2.473,5.768 L 0.959,9.336 C 0.09,7.297 -0.781,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.24,-10.014 l 3,0 c 0.697,1.664 1.402,3.332 2.115,5.002 0.709,1.678 1.414,3.346 2.115,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path140"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(293.6025,73.0156)"
-           id="g142"><path
-             d="M 0,0 C -1.855,4.389 -3.723,8.797 -5.596,13.234 -6.105,12.037 -6.605,10.871 -7.102,9.678 l 3.579,-8.463 1.896,-4.424 0.006,0 c 0.699,-1.666 1.402,-3.334 2.113,-5.012 0.707,-1.67 1.41,-3.338 2.115,-5.002 l 2.998,0 C 3.723,-8.787 1.852,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path144"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(284.0322,69.8066)"
-           id="g146"><path
-             d="m 0,0 1.746,4.076 0.004,0 C 1.994,4.643 2.229,5.205 2.473,5.768 L 0.967,9.336 C 0.1,7.297 -0.773,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.232,-10.014 l 3.002,0 c 0.689,1.664 1.402,3.332 2.111,5.002 0.709,1.678 1.416,3.346 2.113,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path148"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></g><text
-       transform="matrix(1,0,0,-1,296.2891,92.6172)"
-       id="text150"><tspan
-         x="0"
-         y="0"
-         id="tspan152"
-         style="font-size:12.5px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">™</tspan></text>
-<text
-       transform="matrix(1,0,0,-1,140.5518,92.6172)"
-       id="text154"><tspan
-         x="0 9.1350002 17.67 24.9 31.620001 39.945"
-         y="0"
-         id="tspan156"
-         style="font-size:15px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">Apache</tspan></text>
-</g></svg>
\ No newline at end of file
diff --git a/publish/assets/img/aurora_logo_ltgreen_bkg.svg b/publish/assets/img/aurora_logo_ltgreen_bkg.svg
deleted file mode 100644
index 317e0b0..0000000
--- a/publish/assets/img/aurora_logo_ltgreen_bkg.svg
+++ /dev/null
@@ -1,240 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   version="1.1"
-   width="425"
-   height="157.5"
-   id="svg2"
-   xml:space="preserve"><metadata
-     id="metadata8"><rdf:RDF><cc:Work
-         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
-     id="defs6"><clipPath
-       id="clipPath114"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path116" /></clipPath><clipPath
-       id="clipPath162"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path164" /></clipPath></defs><g
-     transform="matrix(1.25,0,0,-1.25,0,187.5)"
-     id="g10"><path
-       d="M 0,0 330,0 330,150 0,150 0,0 z"
-       inkscape:connector-curvature="0"
-       id="path12"
-       style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
-       transform="translate(89.4268,98.3438)"
-       id="g14"><path
-         d="M 0,0 -18.35,0 -9.16,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path16"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,97.3203)"
-       id="g18"><path
-         d="m 0,0 -9.166,-15.852 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path20"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,79.4082)"
-       id="g22"><path
-         d="M 0,0 -18.354,0 -9.188,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path24"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,78.3828)"
-       id="g26"><path
-         d="m 0,0 -9.219,-15.848 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path28"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,97.3203)"
-       id="g30"><path
-         d="m 0,0 -9.158,-15.852 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path32"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,79.4082)"
-       id="g34"><path
-         d="M 0,0 -18.352,0 -9.193,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path36"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,116.2793)"
-       id="g38"><path
-         d="m 0,0 -9.189,-15.875 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path40"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,60.4707)"
-       id="g42"><path
-         d="M 0,0 -18.381,0 -9.162,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path44"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,78.3828)"
-       id="g46"><path
-         d="m 0,0 -9.186,-15.848 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path48"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,60.4707)"
-       id="g50"><path
-         d="M 0,0 -18.375,0 -9.189,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path52"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,78.3828)"
-       id="g54"><path
-         d="m 0,0 -9.189,-15.848 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path56"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,98.3438)"
-       id="g58"><path
-         d="M 0,0 -18.381,0 -9.162,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path60"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(38.2119,81.4688)"
-       id="g62"><path
-         d="M 0,0 18.348,0 9.162,15.852"
-         inkscape:connector-curvature="0"
-         id="path64"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(47.374,63.5449)"
-       id="g66"><path
-         d="m 0,0 9.186,15.863 -18.348,0"
-         inkscape:connector-curvature="0"
-         id="path68"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(113.0986,97.3203)"
-       id="g70"><path
-         d="m 0,0 -9.156,-15.852 18.345,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path72"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(122.2881,79.4082)"
-       id="g74"><path
-         d="M 0,0 -18.346,0 -9.189,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path76"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,117.3066)"
-       id="g78"><path
-         d="M 0,0 -18.352,0 -9.193,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path80"
-         style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(89.4268,60.4707)"
-       id="g82"><path
-         d="M 0,0 -18.379,0 -9.16,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path84"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,59.4492)"
-       id="g86"><path
-         d="m 0,0 -9.158,-15.99 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path88"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,117.3066)"
-       id="g90"><path
-         d="M 0,0 -18.354,0 -9.188,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path92"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,116.2793)"
-       id="g94"><path
-         d="m 0,0 -9.219,-15.875 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path96"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,116.2793)"
-       id="g98"><path
-         d="m 0,0 -9.186,-15.875 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path100"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,98.3438)"
-       id="g102"><path
-         d="M 0,0 -18.375,0 -9.189,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path104"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,59.4492)"
-       id="g106"><path
-         d="m 0,0 -9.166,-15.99 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path108"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       id="g110"><g
-         clip-path="url(#clipPath114)"
-         id="g112"><g
-           transform="translate(180.5576,59.5039)"
-           id="g118"><path
-             d="m 0,0 c -1.295,0 -2.502,0.18 -3.615,0.537 -1.106,0.371 -2.071,0.9 -2.889,1.609 -0.814,0.713 -1.455,1.6 -1.92,2.659 -0.455,1.058 -0.678,2.277 -0.678,3.648 l 0,17.135 2.998,0 0,-17.024 c 0,-0.916 0.145,-1.728 0.454,-2.441 C -5.357,5.412 -4.934,4.814 -4.396,4.336 -3.842,3.852 -3.203,3.488 -2.461,3.238 -1.707,2.979 -0.889,2.85 0,2.85 c 0.896,0 1.709,0.129 2.455,0.388 0.75,0.25 1.395,0.614 1.938,1.098 0.541,0.478 0.968,1.076 1.283,1.787 0.314,0.713 0.467,1.525 0.467,2.441 l 0,17.024 3.003,0 0,-17.135 C 9.146,7.082 8.908,5.863 8.439,4.805 7.971,3.746 7.326,2.859 6.506,2.146 5.689,1.437 4.721,0.908 3.613,0.537 2.506,0.18 1.307,0 0,0"
-             inkscape:connector-curvature="0"
-             id="path120"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(200.9756,73.959)"
-           id="g122"><path
-             d="m 0,0 3.145,0 c 1.06,0 1.935,0.135 2.623,0.412 0.681,0.277 1.236,0.623 1.64,1.035 0.41,0.407 0.701,0.873 0.871,1.373 0.168,0.502 0.254,0.987 0.254,1.453 0,0.518 -0.14,1.03 -0.418,1.534 C 7.838,6.301 7.465,6.729 6.998,7.104 6.527,7.48 5.963,7.77 5.297,7.988 4.635,8.205 3.93,8.316 3.188,8.316 L 0,8.316 0,0 z m -2.994,-14.166 0,25.299 5.965,0 c 0.812,0 1.722,-0.127 2.726,-0.377 0.992,-0.254 1.932,-0.664 2.799,-1.213 0.865,-0.553 1.592,-1.264 2.168,-2.131 0.58,-0.867 0.865,-1.92 0.865,-3.139 0,-1.818 -0.449,-3.328 -1.351,-4.541 C 9.271,-1.486 7.959,-2.287 6.225,-2.674 l 5.416,-11.492 -3.252,0 -5.311,11.277 -0.615,0 -2.463,0 0,-11.277 -2.994,0 z"
-             inkscape:connector-curvature="0"
-             id="path124"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(223.4951,72.4102)"
-           id="g126"><path
-             d="m 0,0 c 0,-1.4 0.26,-2.711 0.771,-3.926 0.522,-1.209 1.235,-2.271 2.137,-3.183 0.905,-0.899 1.961,-1.618 3.16,-2.145 1.202,-0.531 2.5,-0.803 3.866,-0.803 1.377,0 2.662,0.272 3.877,0.803 1.199,0.527 2.253,1.246 3.162,2.145 0.902,0.912 1.607,1.974 2.123,3.183 0.519,1.215 0.773,2.526 0.773,3.926 0,1.393 -0.254,2.699 -0.773,3.914 -0.516,1.219 -1.221,2.281 -2.123,3.203 -0.909,0.916 -1.963,1.639 -3.162,2.168 -1.215,0.528 -2.5,0.793 -3.877,0.793 C 8.568,10.078 7.27,9.813 6.068,9.285 4.869,8.756 3.813,8.033 2.908,7.117 2.006,6.195 1.293,5.133 0.771,3.914 0.26,2.699 0,1.393 0,0 m -3.004,0 c 0,1.752 0.346,3.424 1.016,5.004 0.672,1.572 1.592,2.953 2.759,4.137 1.172,1.179 2.536,2.113 4.104,2.8 1.564,0.684 3.248,1.028 5.027,1.028 1.756,0 3.432,-0.344 5.006,-1.028 1.58,-0.687 2.957,-1.621 4.135,-2.8 1.182,-1.184 2.119,-2.565 2.803,-4.137 0.687,-1.58 1.027,-3.252 1.027,-5.004 0,-1.789 -0.34,-3.463 -1.027,-5.031 -0.684,-1.567 -1.621,-2.932 -2.803,-4.102 -1.178,-1.166 -2.555,-2.092 -4.135,-2.758 -1.574,-0.683 -3.25,-1.015 -5.006,-1.015 -1.779,0 -3.463,0.332 -5.027,1.015 -1.568,0.666 -2.932,1.592 -4.104,2.758 -1.167,1.17 -2.087,2.535 -2.759,4.102 -0.67,1.568 -1.016,3.242 -1.016,5.031"
-             inkscape:connector-curvature="0"
-             id="path128"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(257.3213,73.959)"
-           id="g130"><path
-             d="M 0,0 3.145,0 C 4.207,0 5.08,0.135 5.768,0.412 6.457,0.689 6.994,1.035 7.406,1.447 7.818,1.854 8.109,2.32 8.277,2.82 8.445,3.322 8.529,3.807 8.529,4.273 8.529,4.791 8.387,5.303 8.109,5.807 7.838,6.301 7.465,6.729 6.994,7.104 6.52,7.48 5.957,7.77 5.297,7.988 4.631,8.205 3.932,8.316 3.188,8.316 L 0,8.316 0,0 z m -3,-14.166 0,25.299 5.957,0 c 0.82,0 1.734,-0.127 2.734,-0.377 1,-0.254 1.938,-0.664 2.805,-1.213 0.861,-0.553 1.588,-1.264 2.168,-2.131 0.572,-0.867 0.867,-1.92 0.867,-3.139 0,-1.818 -0.455,-3.328 -1.353,-4.541 C 9.27,-1.486 7.953,-2.287 6.213,-2.674 l 5.43,-11.492 -3.256,0 -5.315,11.277 -0.609,0 -2.463,0 0,-11.277 -3,0 z"
-             inkscape:connector-curvature="0"
-             id="path132"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(157.3545,73.0156)"
-           id="g134"><path
-             d="M 0,0 C -1.854,4.389 -3.719,8.797 -5.596,13.234 -6.105,12.037 -6.6,10.871 -7.104,9.678 l 3.586,-8.463 1.889,-4.424 0.006,0 c 0.705,-1.666 1.406,-3.334 2.117,-5.012 0.709,-1.67 1.41,-3.338 2.113,-5.002 l 3.004,0 C 3.729,-8.787 1.854,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path136"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(147.792,69.8066)"
-           id="g138"><path
-             d="m 0,0 1.738,4.076 0.004,0 C 1.994,4.643 2.221,5.205 2.473,5.768 L 0.959,9.336 C 0.09,7.297 -0.781,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.24,-10.014 l 3,0 c 0.697,1.664 1.402,3.332 2.115,5.002 0.709,1.678 1.414,3.346 2.115,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path140"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(293.6025,73.0156)"
-           id="g142"><path
-             d="M 0,0 C -1.855,4.389 -3.723,8.797 -5.596,13.234 -6.105,12.037 -6.605,10.871 -7.102,9.678 l 3.579,-8.463 1.896,-4.424 0.006,0 c 0.699,-1.666 1.402,-3.334 2.113,-5.012 0.707,-1.67 1.41,-3.338 2.115,-5.002 l 2.998,0 C 3.723,-8.787 1.852,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path144"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(284.0322,69.8066)"
-           id="g146"><path
-             d="m 0,0 1.746,4.076 0.004,0 C 1.994,4.643 2.229,5.205 2.473,5.768 L 0.967,9.336 C 0.1,7.297 -0.773,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.232,-10.014 l 3.002,0 c 0.689,1.664 1.402,3.332 2.111,5.002 0.709,1.678 1.416,3.346 2.113,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path148"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></g><text
-       transform="matrix(1,0,0,-1,296.2891,92.6172)"
-       id="text150"><tspan
-         x="0"
-         y="0"
-         id="tspan152"
-         style="font-size:12.5px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">™</tspan></text>
-<text
-       transform="matrix(1,0,0,-1,140.5518,92.6172)"
-       id="text154"><tspan
-         x="0 9.1350002 17.67 24.9 31.620001 39.945"
-         y="0"
-         id="tspan156"
-         style="font-size:15px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">Apache</tspan></text>
-</g></svg>
\ No newline at end of file
diff --git a/publish/assets/img/aurora_logo_white_bkg.svg b/publish/assets/img/aurora_logo_white_bkg.svg
deleted file mode 100644
index 2b39942..0000000
--- a/publish/assets/img/aurora_logo_white_bkg.svg
+++ /dev/null
@@ -1,240 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   version="1.1"
-   width="425"
-   height="157.5"
-   id="svg2"
-   xml:space="preserve"><metadata
-     id="metadata8"><rdf:RDF><cc:Work
-         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><defs
-     id="defs6"><clipPath
-       id="clipPath114"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path116" /></clipPath><clipPath
-       id="clipPath162"><path
-         d="M 0,150 660,150 660,0 0,0 0,150 z"
-         inkscape:connector-curvature="0"
-         id="path164" /></clipPath></defs><g
-     transform="matrix(1.25,0,0,-1.25,0,187.5)"
-     id="g10"><path
-       d="M 0,0 330,0 330,150 0,150 0,0 z"
-       inkscape:connector-curvature="0"
-       id="path12"
-       style="fill:none;fill-opacity:1;fill-rule:nonzero;stroke:none" /><g
-       transform="translate(89.4268,98.3438)"
-       id="g14"><path
-         d="M 0,0 -18.35,0 -9.16,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path16"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,97.3203)"
-       id="g18"><path
-         d="m 0,0 -9.166,-15.852 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path20"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,79.4082)"
-       id="g22"><path
-         d="M 0,0 -18.354,0 -9.188,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path24"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,78.3828)"
-       id="g26"><path
-         d="m 0,0 -9.219,-15.848 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path28"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,97.3203)"
-       id="g30"><path
-         d="m 0,0 -9.158,-15.852 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path32"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,79.4082)"
-       id="g34"><path
-         d="M 0,0 -18.352,0 -9.193,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path36"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,116.2793)"
-       id="g38"><path
-         d="m 0,0 -9.189,-15.875 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path40"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,60.4707)"
-       id="g42"><path
-         d="M 0,0 -18.381,0 -9.162,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path44"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,78.3828)"
-       id="g46"><path
-         d="m 0,0 -9.186,-15.848 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path48"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,60.4707)"
-       id="g50"><path
-         d="M 0,0 -18.375,0 -9.189,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path52"
-         style="fill:#63c4b1;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(80.2666,78.3828)"
-       id="g54"><path
-         d="m 0,0 -9.189,-15.848 18.349,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path56"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(67.5205,98.3438)"
-       id="g58"><path
-         d="M 0,0 -18.381,0 -9.162,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path60"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(38.2119,81.4688)"
-       id="g62"><path
-         d="M 0,0 18.348,0 9.162,15.852"
-         inkscape:connector-curvature="0"
-         id="path64"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(47.374,63.5449)"
-       id="g66"><path
-         d="m 0,0 9.186,15.863 -18.348,0"
-         inkscape:connector-curvature="0"
-         id="path68"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(113.0986,97.3203)"
-       id="g70"><path
-         d="m 0,0 -9.156,-15.852 18.345,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path72"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(122.2881,79.4082)"
-       id="g74"><path
-         d="M 0,0 -18.346,0 -9.189,-15.863 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path76"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(100.3838,117.3066)"
-       id="g78"><path
-         d="M 0,0 -18.352,0 -9.193,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path80"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(89.4268,60.4707)"
-       id="g82"><path
-         d="M 0,0 -18.379,0 -9.16,-15.967 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path84"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(91.1904,59.4492)"
-       id="g86"><path
-         d="m 0,0 -9.158,-15.99 18.351,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path88"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(78.4736,117.3066)"
-       id="g90"><path
-         d="M 0,0 -18.354,0 -9.188,-15.834 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path92"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(58.3584,116.2793)"
-       id="g94"><path
-         d="m 0,0 -9.219,-15.875 18.381,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path96"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(102.1475,116.2793)"
-       id="g98"><path
-         d="m 0,0 -9.186,-15.875 18.375,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path100"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(111.3369,98.3438)"
-       id="g102"><path
-         d="M 0,0 -18.375,0 -9.189,-15.824 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path104"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       transform="translate(69.2861,59.4492)"
-       id="g106"><path
-         d="m 0,0 -9.166,-15.99 18.354,0 L 0,0 z"
-         inkscape:connector-curvature="0"
-         id="path108"
-         style="fill:#d6d6d6;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-       id="g110"><g
-         clip-path="url(#clipPath114)"
-         id="g112"><g
-           transform="translate(180.5576,59.5039)"
-           id="g118"><path
-             d="m 0,0 c -1.295,0 -2.502,0.18 -3.615,0.537 -1.106,0.371 -2.071,0.9 -2.889,1.609 -0.814,0.713 -1.455,1.6 -1.92,2.659 -0.455,1.058 -0.678,2.277 -0.678,3.648 l 0,17.135 2.998,0 0,-17.024 c 0,-0.916 0.145,-1.728 0.454,-2.441 C -5.357,5.412 -4.934,4.814 -4.396,4.336 -3.842,3.852 -3.203,3.488 -2.461,3.238 -1.707,2.979 -0.889,2.85 0,2.85 c 0.896,0 1.709,0.129 2.455,0.388 0.75,0.25 1.395,0.614 1.938,1.098 0.541,0.478 0.968,1.076 1.283,1.787 0.314,0.713 0.467,1.525 0.467,2.441 l 0,17.024 3.003,0 0,-17.135 C 9.146,7.082 8.908,5.863 8.439,4.805 7.971,3.746 7.326,2.859 6.506,2.146 5.689,1.437 4.721,0.908 3.613,0.537 2.506,0.18 1.307,0 0,0"
-             inkscape:connector-curvature="0"
-             id="path120"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(200.9756,73.959)"
-           id="g122"><path
-             d="m 0,0 3.145,0 c 1.06,0 1.935,0.135 2.623,0.412 0.681,0.277 1.236,0.623 1.64,1.035 0.41,0.407 0.701,0.873 0.871,1.373 0.168,0.502 0.254,0.987 0.254,1.453 0,0.518 -0.14,1.03 -0.418,1.534 C 7.838,6.301 7.465,6.729 6.998,7.104 6.527,7.48 5.963,7.77 5.297,7.988 4.635,8.205 3.93,8.316 3.188,8.316 L 0,8.316 0,0 z m -2.994,-14.166 0,25.299 5.965,0 c 0.812,0 1.722,-0.127 2.726,-0.377 0.992,-0.254 1.932,-0.664 2.799,-1.213 0.865,-0.553 1.592,-1.264 2.168,-2.131 0.58,-0.867 0.865,-1.92 0.865,-3.139 0,-1.818 -0.449,-3.328 -1.351,-4.541 C 9.271,-1.486 7.959,-2.287 6.225,-2.674 l 5.416,-11.492 -3.252,0 -5.311,11.277 -0.615,0 -2.463,0 0,-11.277 -2.994,0 z"
-             inkscape:connector-curvature="0"
-             id="path124"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(223.4951,72.4102)"
-           id="g126"><path
-             d="m 0,0 c 0,-1.4 0.26,-2.711 0.771,-3.926 0.522,-1.209 1.235,-2.271 2.137,-3.183 0.905,-0.899 1.961,-1.618 3.16,-2.145 1.202,-0.531 2.5,-0.803 3.866,-0.803 1.377,0 2.662,0.272 3.877,0.803 1.199,0.527 2.253,1.246 3.162,2.145 0.902,0.912 1.607,1.974 2.123,3.183 0.519,1.215 0.773,2.526 0.773,3.926 0,1.393 -0.254,2.699 -0.773,3.914 -0.516,1.219 -1.221,2.281 -2.123,3.203 -0.909,0.916 -1.963,1.639 -3.162,2.168 -1.215,0.528 -2.5,0.793 -3.877,0.793 C 8.568,10.078 7.27,9.813 6.068,9.285 4.869,8.756 3.813,8.033 2.908,7.117 2.006,6.195 1.293,5.133 0.771,3.914 0.26,2.699 0,1.393 0,0 m -3.004,0 c 0,1.752 0.346,3.424 1.016,5.004 0.672,1.572 1.592,2.953 2.759,4.137 1.172,1.179 2.536,2.113 4.104,2.8 1.564,0.684 3.248,1.028 5.027,1.028 1.756,0 3.432,-0.344 5.006,-1.028 1.58,-0.687 2.957,-1.621 4.135,-2.8 1.182,-1.184 2.119,-2.565 2.803,-4.137 0.687,-1.58 1.027,-3.252 1.027,-5.004 0,-1.789 -0.34,-3.463 -1.027,-5.031 -0.684,-1.567 -1.621,-2.932 -2.803,-4.102 -1.178,-1.166 -2.555,-2.092 -4.135,-2.758 -1.574,-0.683 -3.25,-1.015 -5.006,-1.015 -1.779,0 -3.463,0.332 -5.027,1.015 -1.568,0.666 -2.932,1.592 -4.104,2.758 -1.167,1.17 -2.087,2.535 -2.759,4.102 -0.67,1.568 -1.016,3.242 -1.016,5.031"
-             inkscape:connector-curvature="0"
-             id="path128"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(257.3213,73.959)"
-           id="g130"><path
-             d="M 0,0 3.145,0 C 4.207,0 5.08,0.135 5.768,0.412 6.457,0.689 6.994,1.035 7.406,1.447 7.818,1.854 8.109,2.32 8.277,2.82 8.445,3.322 8.529,3.807 8.529,4.273 8.529,4.791 8.387,5.303 8.109,5.807 7.838,6.301 7.465,6.729 6.994,7.104 6.52,7.48 5.957,7.77 5.297,7.988 4.631,8.205 3.932,8.316 3.188,8.316 L 0,8.316 0,0 z m -3,-14.166 0,25.299 5.957,0 c 0.82,0 1.734,-0.127 2.734,-0.377 1,-0.254 1.938,-0.664 2.805,-1.213 0.861,-0.553 1.588,-1.264 2.168,-2.131 0.572,-0.867 0.867,-1.92 0.867,-3.139 0,-1.818 -0.455,-3.328 -1.353,-4.541 C 9.27,-1.486 7.953,-2.287 6.213,-2.674 l 5.43,-11.492 -3.256,0 -5.315,11.277 -0.609,0 -2.463,0 0,-11.277 -3,0 z"
-             inkscape:connector-curvature="0"
-             id="path132"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(157.3545,73.0156)"
-           id="g134"><path
-             d="M 0,0 C -1.854,4.389 -3.719,8.797 -5.596,13.234 -6.105,12.037 -6.6,10.871 -7.104,9.678 l 3.586,-8.463 1.889,-4.424 0.006,0 c 0.705,-1.666 1.406,-3.334 2.117,-5.012 0.709,-1.67 1.41,-3.338 2.113,-5.002 l 3.004,0 C 3.729,-8.787 1.854,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path136"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(147.792,69.8066)"
-           id="g138"><path
-             d="m 0,0 1.738,4.076 0.004,0 C 1.994,4.643 2.221,5.205 2.473,5.768 L 0.959,9.336 C 0.09,7.297 -0.781,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.24,-10.014 l 3,0 c 0.697,1.664 1.402,3.332 2.115,5.002 0.709,1.678 1.414,3.346 2.115,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path140"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(293.6025,73.0156)"
-           id="g142"><path
-             d="M 0,0 C -1.855,4.389 -3.723,8.797 -5.596,13.234 -6.105,12.037 -6.605,10.871 -7.102,9.678 l 3.579,-8.463 1.896,-4.424 0.006,0 c 0.699,-1.666 1.402,-3.334 2.113,-5.012 0.707,-1.67 1.41,-3.338 2.115,-5.002 l 2.998,0 C 3.723,-8.787 1.852,-4.375 0,0"
-             inkscape:connector-curvature="0"
-             id="path144"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g><g
-           transform="translate(284.0322,69.8066)"
-           id="g146"><path
-             d="m 0,0 1.746,4.076 0.004,0 C 1.994,4.643 2.229,5.205 2.473,5.768 L 0.967,9.336 C 0.1,7.297 -0.773,5.244 -1.639,3.209 -3.486,-1.166 -5.357,-5.578 -7.232,-10.014 l 3.002,0 c 0.689,1.664 1.402,3.332 2.111,5.002 0.709,1.678 1.416,3.346 2.113,5.012 L 0,0 z"
-             inkscape:connector-curvature="0"
-             id="path148"
-             style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none" /></g></g></g><text
-       transform="matrix(1,0,0,-1,296.2891,92.6172)"
-       id="text150"><tspan
-         x="0"
-         y="0"
-         id="tspan152"
-         style="font-size:12.5px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">™</tspan></text>
-<text
-       transform="matrix(1,0,0,-1,140.5518,92.6172)"
-       id="text154"><tspan
-         x="0 9.1350002 17.67 24.9 31.620001 39.945"
-         y="0"
-         id="tspan156"
-         style="font-size:15px;font-variant:normal;font-weight:normal;font-stretch:normal;writing-mode:lr-tb;fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;font-family:Helvetica Neue, Helvetica, Arial, sans-serif">Apache</tspan></text>
-</g></svg>
\ No newline at end of file
diff --git a/publish/assets/img/favicon.ico b/publish/assets/img/favicon.ico
deleted file mode 100644
index f0c22ad..0000000
--- a/publish/assets/img/favicon.ico
+++ /dev/null
Binary files differ
diff --git a/publish/assets/img/glyphicons-halflings-white.png b/publish/assets/img/glyphicons-halflings-white.png
deleted file mode 100644
index 3bf6484..0000000
--- a/publish/assets/img/glyphicons-halflings-white.png
+++ /dev/null
Binary files differ
diff --git a/publish/assets/img/glyphicons-halflings.png b/publish/assets/img/glyphicons-halflings.png
deleted file mode 100644
index a996999..0000000
--- a/publish/assets/img/glyphicons-halflings.png
+++ /dev/null
Binary files differ
diff --git a/publish/assets/js/bootstrap.js b/publish/assets/js/bootstrap.js
deleted file mode 100644
index f25c81e..0000000
--- a/publish/assets/js/bootstrap.js
+++ /dev/null
@@ -1,2281 +0,0 @@
-/* ===================================================
- * bootstrap-transition.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#transitions
- * ===================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
-  /* CSS TRANSITION SUPPORT (http://www.modernizr.com/)
-   * ======================================================= */
-
-  $(function () {
-
-    $.support.transition = (function () {
-
-      var transitionEnd = (function () {
-
-        var el = document.createElement('bootstrap')
-          , transEndEventNames = {
-               'WebkitTransition' : 'webkitTransitionEnd'
-            ,  'MozTransition'    : 'transitionend'
-            ,  'OTransition'      : 'oTransitionEnd otransitionend'
-            ,  'transition'       : 'transitionend'
-            }
-          , name
-
-        for (name in transEndEventNames){
-          if (el.style[name] !== undefined) {
-            return transEndEventNames[name]
-          }
-        }
-
-      }())
-
-      return transitionEnd && {
-        end: transitionEnd
-      }
-
-    })()
-
-  })
-
-}(window.jQuery);/* ==========================================================
- * bootstrap-alert.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#alerts
- * ==========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* ALERT CLASS DEFINITION
-  * ====================== */
-
-  var dismiss = '[data-dismiss="alert"]'
-    , Alert = function (el) {
-        $(el).on('click', dismiss, this.close)
-      }
-
-  Alert.prototype.close = function (e) {
-    var $this = $(this)
-      , selector = $this.attr('data-target')
-      , $parent
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-    }
-
-    $parent = $(selector)
-
-    e && e.preventDefault()
-
-    $parent.length || ($parent = $this.hasClass('alert') ? $this : $this.parent())
-
-    $parent.trigger(e = $.Event('close'))
-
-    if (e.isDefaultPrevented()) return
-
-    $parent.removeClass('in')
-
-    function removeElement() {
-      $parent
-        .trigger('closed')
-        .remove()
-    }
-
-    $.support.transition && $parent.hasClass('fade') ?
-      $parent.on($.support.transition.end, removeElement) :
-      removeElement()
-  }
-
-
- /* ALERT PLUGIN DEFINITION
-  * ======================= */
-
-  var old = $.fn.alert
-
-  $.fn.alert = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('alert')
-      if (!data) $this.data('alert', (data = new Alert(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  $.fn.alert.Constructor = Alert
-
-
- /* ALERT NO CONFLICT
-  * ================= */
-
-  $.fn.alert.noConflict = function () {
-    $.fn.alert = old
-    return this
-  }
-
-
- /* ALERT DATA-API
-  * ============== */
-
-  $(document).on('click.alert.data-api', dismiss, Alert.prototype.close)
-
-}(window.jQuery);/* ============================================================
- * bootstrap-button.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#buttons
- * ============================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* BUTTON PUBLIC CLASS DEFINITION
-  * ============================== */
-
-  var Button = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.button.defaults, options)
-  }
-
-  Button.prototype.setState = function (state) {
-    var d = 'disabled'
-      , $el = this.$element
-      , data = $el.data()
-      , val = $el.is('input') ? 'val' : 'html'
-
-    state = state + 'Text'
-    data.resetText || $el.data('resetText', $el[val]())
-
-    $el[val](data[state] || this.options[state])
-
-    // push to event loop to allow forms to submit
-    setTimeout(function () {
-      state == 'loadingText' ?
-        $el.addClass(d).attr(d, d) :
-        $el.removeClass(d).removeAttr(d)
-    }, 0)
-  }
-
-  Button.prototype.toggle = function () {
-    var $parent = this.$element.closest('[data-toggle="buttons-radio"]')
-
-    $parent && $parent
-      .find('.active')
-      .removeClass('active')
-
-    this.$element.toggleClass('active')
-  }
-
-
- /* BUTTON PLUGIN DEFINITION
-  * ======================== */
-
-  var old = $.fn.button
-
-  $.fn.button = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('button')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('button', (data = new Button(this, options)))
-      if (option == 'toggle') data.toggle()
-      else if (option) data.setState(option)
-    })
-  }
-
-  $.fn.button.defaults = {
-    loadingText: 'loading...'
-  }
-
-  $.fn.button.Constructor = Button
-
-
- /* BUTTON NO CONFLICT
-  * ================== */
-
-  $.fn.button.noConflict = function () {
-    $.fn.button = old
-    return this
-  }
-
-
- /* BUTTON DATA-API
-  * =============== */
-
-  $(document).on('click.button.data-api', '[data-toggle^=button]', function (e) {
-    var $btn = $(e.target)
-    if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')
-    $btn.button('toggle')
-  })
-
-}(window.jQuery);/* ==========================================================
- * bootstrap-carousel.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#carousel
- * ==========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* CAROUSEL CLASS DEFINITION
-  * ========================= */
-
-  var Carousel = function (element, options) {
-    this.$element = $(element)
-    this.$indicators = this.$element.find('.carousel-indicators')
-    this.options = options
-    this.options.pause == 'hover' && this.$element
-      .on('mouseenter', $.proxy(this.pause, this))
-      .on('mouseleave', $.proxy(this.cycle, this))
-  }
-
-  Carousel.prototype = {
-
-    cycle: function (e) {
-      if (!e) this.paused = false
-      if (this.interval) clearInterval(this.interval);
-      this.options.interval
-        && !this.paused
-        && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
-      return this
-    }
-
-  , getActiveIndex: function () {
-      this.$active = this.$element.find('.item.active')
-      this.$items = this.$active.parent().children()
-      return this.$items.index(this.$active)
-    }
-
-  , to: function (pos) {
-      var activeIndex = this.getActiveIndex()
-        , that = this
-
-      if (pos > (this.$items.length - 1) || pos < 0) return
-
-      if (this.sliding) {
-        return this.$element.one('slid', function () {
-          that.to(pos)
-        })
-      }
-
-      if (activeIndex == pos) {
-        return this.pause().cycle()
-      }
-
-      return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos]))
-    }
-
-  , pause: function (e) {
-      if (!e) this.paused = true
-      if (this.$element.find('.next, .prev').length && $.support.transition.end) {
-        this.$element.trigger($.support.transition.end)
-        this.cycle(true)
-      }
-      clearInterval(this.interval)
-      this.interval = null
-      return this
-    }
-
-  , next: function () {
-      if (this.sliding) return
-      return this.slide('next')
-    }
-
-  , prev: function () {
-      if (this.sliding) return
-      return this.slide('prev')
-    }
-
-  , slide: function (type, next) {
-      var $active = this.$element.find('.item.active')
-        , $next = next || $active[type]()
-        , isCycling = this.interval
-        , direction = type == 'next' ? 'left' : 'right'
-        , fallback  = type == 'next' ? 'first' : 'last'
-        , that = this
-        , e
-
-      this.sliding = true
-
-      isCycling && this.pause()
-
-      $next = $next.length ? $next : this.$element.find('.item')[fallback]()
-
-      e = $.Event('slide', {
-        relatedTarget: $next[0]
-      , direction: direction
-      })
-
-      if ($next.hasClass('active')) return
-
-      if (this.$indicators.length) {
-        this.$indicators.find('.active').removeClass('active')
-        this.$element.one('slid', function () {
-          var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()])
-          $nextIndicator && $nextIndicator.addClass('active')
-        })
-      }
-
-      if ($.support.transition && this.$element.hasClass('slide')) {
-        this.$element.trigger(e)
-        if (e.isDefaultPrevented()) return
-        $next.addClass(type)
-        $next[0].offsetWidth // force reflow
-        $active.addClass(direction)
-        $next.addClass(direction)
-        this.$element.one($.support.transition.end, function () {
-          $next.removeClass([type, direction].join(' ')).addClass('active')
-          $active.removeClass(['active', direction].join(' '))
-          that.sliding = false
-          setTimeout(function () { that.$element.trigger('slid') }, 0)
-        })
-      } else {
-        this.$element.trigger(e)
-        if (e.isDefaultPrevented()) return
-        $active.removeClass('active')
-        $next.addClass('active')
-        this.sliding = false
-        this.$element.trigger('slid')
-      }
-
-      isCycling && this.cycle()
-
-      return this
-    }
-
-  }
-
-
- /* CAROUSEL PLUGIN DEFINITION
-  * ========================== */
-
-  var old = $.fn.carousel
-
-  $.fn.carousel = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('carousel')
-        , options = $.extend({}, $.fn.carousel.defaults, typeof option == 'object' && option)
-        , action = typeof option == 'string' ? option : options.slide
-      if (!data) $this.data('carousel', (data = new Carousel(this, options)))
-      if (typeof option == 'number') data.to(option)
-      else if (action) data[action]()
-      else if (options.interval) data.pause().cycle()
-    })
-  }
-
-  $.fn.carousel.defaults = {
-    interval: 5000
-  , pause: 'hover'
-  }
-
-  $.fn.carousel.Constructor = Carousel
-
-
- /* CAROUSEL NO CONFLICT
-  * ==================== */
-
-  $.fn.carousel.noConflict = function () {
-    $.fn.carousel = old
-    return this
-  }
-
- /* CAROUSEL DATA-API
-  * ================= */
-
-  $(document).on('click.carousel.data-api', '[data-slide], [data-slide-to]', function (e) {
-    var $this = $(this), href
-      , $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
-      , options = $.extend({}, $target.data(), $this.data())
-      , slideIndex
-
-    $target.carousel(options)
-
-    if (slideIndex = $this.attr('data-slide-to')) {
-      $target.data('carousel').pause().to(slideIndex).cycle()
-    }
-
-    e.preventDefault()
-  })
-
-}(window.jQuery);/* =============================================================
- * bootstrap-collapse.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#collapse
- * =============================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* COLLAPSE PUBLIC CLASS DEFINITION
-  * ================================ */
-
-  var Collapse = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.collapse.defaults, options)
-
-    if (this.options.parent) {
-      this.$parent = $(this.options.parent)
-    }
-
-    this.options.toggle && this.toggle()
-  }
-
-  Collapse.prototype = {
-
-    constructor: Collapse
-
-  , dimension: function () {
-      var hasWidth = this.$element.hasClass('width')
-      return hasWidth ? 'width' : 'height'
-    }
-
-  , show: function () {
-      var dimension
-        , scroll
-        , actives
-        , hasData
-
-      if (this.transitioning || this.$element.hasClass('in')) return
-
-      dimension = this.dimension()
-      scroll = $.camelCase(['scroll', dimension].join('-'))
-      actives = this.$parent && this.$parent.find('> .accordion-group > .in')
-
-      if (actives && actives.length) {
-        hasData = actives.data('collapse')
-        if (hasData && hasData.transitioning) return
-        actives.collapse('hide')
-        hasData || actives.data('collapse', null)
-      }
-
-      this.$element[dimension](0)
-      this.transition('addClass', $.Event('show'), 'shown')
-      $.support.transition && this.$element[dimension](this.$element[0][scroll])
-    }
-
-  , hide: function () {
-      var dimension
-      if (this.transitioning || !this.$element.hasClass('in')) return
-      dimension = this.dimension()
-      this.reset(this.$element[dimension]())
-      this.transition('removeClass', $.Event('hide'), 'hidden')
-      this.$element[dimension](0)
-    }
-
-  , reset: function (size) {
-      var dimension = this.dimension()
-
-      this.$element
-        .removeClass('collapse')
-        [dimension](size || 'auto')
-        [0].offsetWidth
-
-      this.$element[size !== null ? 'addClass' : 'removeClass']('collapse')
-
-      return this
-    }
-
-  , transition: function (method, startEvent, completeEvent) {
-      var that = this
-        , complete = function () {
-            if (startEvent.type == 'show') that.reset()
-            that.transitioning = 0
-            that.$element.trigger(completeEvent)
-          }
-
-      this.$element.trigger(startEvent)
-
-      if (startEvent.isDefaultPrevented()) return
-
-      this.transitioning = 1
-
-      this.$element[method]('in')
-
-      $.support.transition && this.$element.hasClass('collapse') ?
-        this.$element.one($.support.transition.end, complete) :
-        complete()
-    }
-
-  , toggle: function () {
-      this[this.$element.hasClass('in') ? 'hide' : 'show']()
-    }
-
-  }
-
-
- /* COLLAPSE PLUGIN DEFINITION
-  * ========================== */
-
-  var old = $.fn.collapse
-
-  $.fn.collapse = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('collapse')
-        , options = $.extend({}, $.fn.collapse.defaults, $this.data(), typeof option == 'object' && option)
-      if (!data) $this.data('collapse', (data = new Collapse(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.collapse.defaults = {
-    toggle: true
-  }
-
-  $.fn.collapse.Constructor = Collapse
-
-
- /* COLLAPSE NO CONFLICT
-  * ==================== */
-
-  $.fn.collapse.noConflict = function () {
-    $.fn.collapse = old
-    return this
-  }
-
-
- /* COLLAPSE DATA-API
-  * ================= */
-
-  $(document).on('click.collapse.data-api', '[data-toggle=collapse]', function (e) {
-    var $this = $(this), href
-      , target = $this.attr('data-target')
-        || e.preventDefault()
-        || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7
-      , option = $(target).data('collapse') ? 'toggle' : $this.data()
-    $this[$(target).hasClass('in') ? 'addClass' : 'removeClass']('collapsed')
-    $(target).collapse(option)
-  })
-
-}(window.jQuery);/* ============================================================
- * bootstrap-dropdown.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#dropdowns
- * ============================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* DROPDOWN CLASS DEFINITION
-  * ========================= */
-
-  var toggle = '[data-toggle=dropdown]'
-    , Dropdown = function (element) {
-        var $el = $(element).on('click.dropdown.data-api', this.toggle)
-        $('html').on('click.dropdown.data-api', function () {
-          $el.parent().removeClass('open')
-        })
-      }
-
-  Dropdown.prototype = {
-
-    constructor: Dropdown
-
-  , toggle: function (e) {
-      var $this = $(this)
-        , $parent
-        , isActive
-
-      if ($this.is('.disabled, :disabled')) return
-
-      $parent = getParent($this)
-
-      isActive = $parent.hasClass('open')
-
-      clearMenus()
-
-      if (!isActive) {
-        if ('ontouchstart' in document.documentElement) {
-          // if mobile we we use a backdrop because click events don't delegate
-          $('<div class="dropdown-backdrop"/>').insertBefore($(this)).on('click', clearMenus)
-        }
-        $parent.toggleClass('open')
-      }
-
-      $this.focus()
-
-      return false
-    }
-
-  , keydown: function (e) {
-      var $this
-        , $items
-        , $active
-        , $parent
-        , isActive
-        , index
-
-      if (!/(38|40|27)/.test(e.keyCode)) return
-
-      $this = $(this)
-
-      e.preventDefault()
-      e.stopPropagation()
-
-      if ($this.is('.disabled, :disabled')) return
-
-      $parent = getParent($this)
-
-      isActive = $parent.hasClass('open')
-
-      if (!isActive || (isActive && e.keyCode == 27)) {
-        if (e.which == 27) $parent.find(toggle).focus()
-        return $this.click()
-      }
-
-      $items = $('[role=menu] li:not(.divider):visible a', $parent)
-
-      if (!$items.length) return
-
-      index = $items.index($items.filter(':focus'))
-
-      if (e.keyCode == 38 && index > 0) index--                                        // up
-      if (e.keyCode == 40 && index < $items.length - 1) index++                        // down
-      if (!~index) index = 0
-
-      $items
-        .eq(index)
-        .focus()
-    }
-
-  }
-
-  function clearMenus() {
-    $('.dropdown-backdrop').remove()
-    $(toggle).each(function () {
-      getParent($(this)).removeClass('open')
-    })
-  }
-
-  function getParent($this) {
-    var selector = $this.attr('data-target')
-      , $parent
-
-    if (!selector) {
-      selector = $this.attr('href')
-      selector = selector && /#/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-    }
-
-    $parent = selector && $(selector)
-
-    if (!$parent || !$parent.length) $parent = $this.parent()
-
-    return $parent
-  }
-
-
-  /* DROPDOWN PLUGIN DEFINITION
-   * ========================== */
-
-  var old = $.fn.dropdown
-
-  $.fn.dropdown = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('dropdown')
-      if (!data) $this.data('dropdown', (data = new Dropdown(this)))
-      if (typeof option == 'string') data[option].call($this)
-    })
-  }
-
-  $.fn.dropdown.Constructor = Dropdown
-
-
- /* DROPDOWN NO CONFLICT
-  * ==================== */
-
-  $.fn.dropdown.noConflict = function () {
-    $.fn.dropdown = old
-    return this
-  }
-
-
-  /* APPLY TO STANDARD DROPDOWN ELEMENTS
-   * =================================== */
-
-  $(document)
-    .on('click.dropdown.data-api', clearMenus)
-    .on('click.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
-    .on('click.dropdown.data-api'  , toggle, Dropdown.prototype.toggle)
-    .on('keydown.dropdown.data-api', toggle + ', [role=menu]' , Dropdown.prototype.keydown)
-
-}(window.jQuery);
-/* =========================================================
- * bootstrap-modal.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#modals
- * =========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================= */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* MODAL CLASS DEFINITION
-  * ====================== */
-
-  var Modal = function (element, options) {
-    this.options = options
-    this.$element = $(element)
-      .delegate('[data-dismiss="modal"]', 'click.dismiss.modal', $.proxy(this.hide, this))
-    this.options.remote && this.$element.find('.modal-body').load(this.options.remote)
-  }
-
-  Modal.prototype = {
-
-      constructor: Modal
-
-    , toggle: function () {
-        return this[!this.isShown ? 'show' : 'hide']()
-      }
-
-    , show: function () {
-        var that = this
-          , e = $.Event('show')
-
-        this.$element.trigger(e)
-
-        if (this.isShown || e.isDefaultPrevented()) return
-
-        this.isShown = true
-
-        this.escape()
-
-        this.backdrop(function () {
-          var transition = $.support.transition && that.$element.hasClass('fade')
-
-          if (!that.$element.parent().length) {
-            that.$element.appendTo(document.body) //don't move modals dom position
-          }
-
-          that.$element.show()
-
-          if (transition) {
-            that.$element[0].offsetWidth // force reflow
-          }
-
-          that.$element
-            .addClass('in')
-            .attr('aria-hidden', false)
-
-          that.enforceFocus()
-
-          transition ?
-            that.$element.one($.support.transition.end, function () { that.$element.focus().trigger('shown') }) :
-            that.$element.focus().trigger('shown')
-
-        })
-      }
-
-    , hide: function (e) {
-        e && e.preventDefault()
-
-        var that = this
-
-        e = $.Event('hide')
-
-        this.$element.trigger(e)
-
-        if (!this.isShown || e.isDefaultPrevented()) return
-
-        this.isShown = false
-
-        this.escape()
-
-        $(document).off('focusin.modal')
-
-        this.$element
-          .removeClass('in')
-          .attr('aria-hidden', true)
-
-        $.support.transition && this.$element.hasClass('fade') ?
-          this.hideWithTransition() :
-          this.hideModal()
-      }
-
-    , enforceFocus: function () {
-        var that = this
-        $(document).on('focusin.modal', function (e) {
-          if (that.$element[0] !== e.target && !that.$element.has(e.target).length) {
-            that.$element.focus()
-          }
-        })
-      }
-
-    , escape: function () {
-        var that = this
-        if (this.isShown && this.options.keyboard) {
-          this.$element.on('keyup.dismiss.modal', function ( e ) {
-            e.which == 27 && that.hide()
-          })
-        } else if (!this.isShown) {
-          this.$element.off('keyup.dismiss.modal')
-        }
-      }
-
-    , hideWithTransition: function () {
-        var that = this
-          , timeout = setTimeout(function () {
-              that.$element.off($.support.transition.end)
-              that.hideModal()
-            }, 500)
-
-        this.$element.one($.support.transition.end, function () {
-          clearTimeout(timeout)
-          that.hideModal()
-        })
-      }
-
-    , hideModal: function () {
-        var that = this
-        this.$element.hide()
-        this.backdrop(function () {
-          that.removeBackdrop()
-          that.$element.trigger('hidden')
-        })
-      }
-
-    , removeBackdrop: function () {
-        this.$backdrop && this.$backdrop.remove()
-        this.$backdrop = null
-      }
-
-    , backdrop: function (callback) {
-        var that = this
-          , animate = this.$element.hasClass('fade') ? 'fade' : ''
-
-        if (this.isShown && this.options.backdrop) {
-          var doAnimate = $.support.transition && animate
-
-          this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />')
-            .appendTo(document.body)
-
-          this.$backdrop.click(
-            this.options.backdrop == 'static' ?
-              $.proxy(this.$element[0].focus, this.$element[0])
-            : $.proxy(this.hide, this)
-          )
-
-          if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
-
-          this.$backdrop.addClass('in')
-
-          if (!callback) return
-
-          doAnimate ?
-            this.$backdrop.one($.support.transition.end, callback) :
-            callback()
-
-        } else if (!this.isShown && this.$backdrop) {
-          this.$backdrop.removeClass('in')
-
-          $.support.transition && this.$element.hasClass('fade')?
-            this.$backdrop.one($.support.transition.end, callback) :
-            callback()
-
-        } else if (callback) {
-          callback()
-        }
-      }
-  }
-
-
- /* MODAL PLUGIN DEFINITION
-  * ======================= */
-
-  var old = $.fn.modal
-
-  $.fn.modal = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('modal')
-        , options = $.extend({}, $.fn.modal.defaults, $this.data(), typeof option == 'object' && option)
-      if (!data) $this.data('modal', (data = new Modal(this, options)))
-      if (typeof option == 'string') data[option]()
-      else if (options.show) data.show()
-    })
-  }
-
-  $.fn.modal.defaults = {
-      backdrop: true
-    , keyboard: true
-    , show: true
-  }
-
-  $.fn.modal.Constructor = Modal
-
-
- /* MODAL NO CONFLICT
-  * ================= */
-
-  $.fn.modal.noConflict = function () {
-    $.fn.modal = old
-    return this
-  }
-
-
- /* MODAL DATA-API
-  * ============== */
-
-  $(document).on('click.modal.data-api', '[data-toggle="modal"]', function (e) {
-    var $this = $(this)
-      , href = $this.attr('href')
-      , $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) //strip for ie7
-      , option = $target.data('modal') ? 'toggle' : $.extend({ remote:!/#/.test(href) && href }, $target.data(), $this.data())
-
-    e.preventDefault()
-
-    $target
-      .modal(option)
-      .one('hide', function () {
-        $this.focus()
-      })
-  })
-
-}(window.jQuery);
-/* ===========================================================
- * bootstrap-tooltip.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#tooltips
- * Inspired by the original jQuery.tipsy by Jason Frame
- * ===========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* TOOLTIP PUBLIC CLASS DEFINITION
-  * =============================== */
-
-  var Tooltip = function (element, options) {
-    this.init('tooltip', element, options)
-  }
-
-  Tooltip.prototype = {
-
-    constructor: Tooltip
-
-  , init: function (type, element, options) {
-      var eventIn
-        , eventOut
-        , triggers
-        , trigger
-        , i
-
-      this.type = type
-      this.$element = $(element)
-      this.options = this.getOptions(options)
-      this.enabled = true
-
-      triggers = this.options.trigger.split(' ')
-
-      for (i = triggers.length; i--;) {
-        trigger = triggers[i]
-        if (trigger == 'click') {
-          this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
-        } else if (trigger != 'manual') {
-          eventIn = trigger == 'hover' ? 'mouseenter' : 'focus'
-          eventOut = trigger == 'hover' ? 'mouseleave' : 'blur'
-          this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
-          this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
-        }
-      }
-
-      this.options.selector ?
-        (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
-        this.fixTitle()
-    }
-
-  , getOptions: function (options) {
-      options = $.extend({}, $.fn[this.type].defaults, this.$element.data(), options)
-
-      if (options.delay && typeof options.delay == 'number') {
-        options.delay = {
-          show: options.delay
-        , hide: options.delay
-        }
-      }
-
-      return options
-    }
-
-  , enter: function (e) {
-      var defaults = $.fn[this.type].defaults
-        , options = {}
-        , self
-
-      this._options && $.each(this._options, function (key, value) {
-        if (defaults[key] != value) options[key] = value
-      }, this)
-
-      self = $(e.currentTarget)[this.type](options).data(this.type)
-
-      if (!self.options.delay || !self.options.delay.show) return self.show()
-
-      clearTimeout(this.timeout)
-      self.hoverState = 'in'
-      this.timeout = setTimeout(function() {
-        if (self.hoverState == 'in') self.show()
-      }, self.options.delay.show)
-    }
-
-  , leave: function (e) {
-      var self = $(e.currentTarget)[this.type](this._options).data(this.type)
-
-      if (this.timeout) clearTimeout(this.timeout)
-      if (!self.options.delay || !self.options.delay.hide) return self.hide()
-
-      self.hoverState = 'out'
-      this.timeout = setTimeout(function() {
-        if (self.hoverState == 'out') self.hide()
-      }, self.options.delay.hide)
-    }
-
-  , show: function () {
-      var $tip
-        , pos
-        , actualWidth
-        , actualHeight
-        , placement
-        , tp
-        , e = $.Event('show')
-
-      if (this.hasContent() && this.enabled) {
-        this.$element.trigger(e)
-        if (e.isDefaultPrevented()) return
-        $tip = this.tip()
-        this.setContent()
-
-        if (this.options.animation) {
-          $tip.addClass('fade')
-        }
-
-        placement = typeof this.options.placement == 'function' ?
-          this.options.placement.call(this, $tip[0], this.$element[0]) :
-          this.options.placement
-
-        $tip
-          .detach()
-          .css({ top: 0, left: 0, display: 'block' })
-
-        this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
-
-        pos = this.getPosition()
-
-        actualWidth = $tip[0].offsetWidth
-        actualHeight = $tip[0].offsetHeight
-
-        switch (placement) {
-          case 'bottom':
-            tp = {top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2}
-            break
-          case 'top':
-            tp = {top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2}
-            break
-          case 'left':
-            tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth}
-            break
-          case 'right':
-            tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width}
-            break
-        }
-
-        this.applyPlacement(tp, placement)
-        this.$element.trigger('shown')
-      }
-    }
-
-  , applyPlacement: function(offset, placement){
-      var $tip = this.tip()
-        , width = $tip[0].offsetWidth
-        , height = $tip[0].offsetHeight
-        , actualWidth
-        , actualHeight
-        , delta
-        , replace
-
-      $tip
-        .offset(offset)
-        .addClass(placement)
-        .addClass('in')
-
-      actualWidth = $tip[0].offsetWidth
-      actualHeight = $tip[0].offsetHeight
-
-      if (placement == 'top' && actualHeight != height) {
-        offset.top = offset.top + height - actualHeight
-        replace = true
-      }
-
-      if (placement == 'bottom' || placement == 'top') {
-        delta = 0
-
-        if (offset.left < 0){
-          delta = offset.left * -2
-          offset.left = 0
-          $tip.offset(offset)
-          actualWidth = $tip[0].offsetWidth
-          actualHeight = $tip[0].offsetHeight
-        }
-
-        this.replaceArrow(delta - width + actualWidth, actualWidth, 'left')
-      } else {
-        this.replaceArrow(actualHeight - height, actualHeight, 'top')
-      }
-
-      if (replace) $tip.offset(offset)
-    }
-
-  , replaceArrow: function(delta, dimension, position){
-      this
-        .arrow()
-        .css(position, delta ? (50 * (1 - delta / dimension) + "%") : '')
-    }
-
-  , setContent: function () {
-      var $tip = this.tip()
-        , title = this.getTitle()
-
-      $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
-      $tip.removeClass('fade in top bottom left right')
-    }
-
-  , hide: function () {
-      var that = this
-        , $tip = this.tip()
-        , e = $.Event('hide')
-
-      this.$element.trigger(e)
-      if (e.isDefaultPrevented()) return
-
-      $tip.removeClass('in')
-
-      function removeWithAnimation() {
-        var timeout = setTimeout(function () {
-          $tip.off($.support.transition.end).detach()
-        }, 500)
-
-        $tip.one($.support.transition.end, function () {
-          clearTimeout(timeout)
-          $tip.detach()
-        })
-      }
-
-      $.support.transition && this.$tip.hasClass('fade') ?
-        removeWithAnimation() :
-        $tip.detach()
-
-      this.$element.trigger('hidden')
-
-      return this
-    }
-
-  , fixTitle: function () {
-      var $e = this.$element
-      if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {
-        $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
-      }
-    }
-
-  , hasContent: function () {
-      return this.getTitle()
-    }
-
-  , getPosition: function () {
-      var el = this.$element[0]
-      return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : {
-        width: el.offsetWidth
-      , height: el.offsetHeight
-      }, this.$element.offset())
-    }
-
-  , getTitle: function () {
-      var title
-        , $e = this.$element
-        , o = this.options
-
-      title = $e.attr('data-original-title')
-        || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
-
-      return title
-    }
-
-  , tip: function () {
-      return this.$tip = this.$tip || $(this.options.template)
-    }
-
-  , arrow: function(){
-      return this.$arrow = this.$arrow || this.tip().find(".tooltip-arrow")
-    }
-
-  , validate: function () {
-      if (!this.$element[0].parentNode) {
-        this.hide()
-        this.$element = null
-        this.options = null
-      }
-    }
-
-  , enable: function () {
-      this.enabled = true
-    }
-
-  , disable: function () {
-      this.enabled = false
-    }
-
-  , toggleEnabled: function () {
-      this.enabled = !this.enabled
-    }
-
-  , toggle: function (e) {
-      var self = e ? $(e.currentTarget)[this.type](this._options).data(this.type) : this
-      self.tip().hasClass('in') ? self.hide() : self.show()
-    }
-
-  , destroy: function () {
-      this.hide().$element.off('.' + this.type).removeData(this.type)
-    }
-
-  }
-
-
- /* TOOLTIP PLUGIN DEFINITION
-  * ========================= */
-
-  var old = $.fn.tooltip
-
-  $.fn.tooltip = function ( option ) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('tooltip')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('tooltip', (data = new Tooltip(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.tooltip.Constructor = Tooltip
-
-  $.fn.tooltip.defaults = {
-    animation: true
-  , placement: 'top'
-  , selector: false
-  , template: '<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>'
-  , trigger: 'hover focus'
-  , title: ''
-  , delay: 0
-  , html: false
-  , container: false
-  }
-
-
- /* TOOLTIP NO CONFLICT
-  * =================== */
-
-  $.fn.tooltip.noConflict = function () {
-    $.fn.tooltip = old
-    return this
-  }
-
-}(window.jQuery);
-/* ===========================================================
- * bootstrap-popover.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#popovers
- * ===========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * =========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* POPOVER PUBLIC CLASS DEFINITION
-  * =============================== */
-
-  var Popover = function (element, options) {
-    this.init('popover', element, options)
-  }
-
-
-  /* NOTE: POPOVER EXTENDS BOOTSTRAP-TOOLTIP.js
-     ========================================== */
-
-  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype, {
-
-    constructor: Popover
-
-  , setContent: function () {
-      var $tip = this.tip()
-        , title = this.getTitle()
-        , content = this.getContent()
-
-      $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
-      $tip.find('.popover-content')[this.options.html ? 'html' : 'text'](content)
-
-      $tip.removeClass('fade top bottom left right in')
-    }
-
-  , hasContent: function () {
-      return this.getTitle() || this.getContent()
-    }
-
-  , getContent: function () {
-      var content
-        , $e = this.$element
-        , o = this.options
-
-      content = (typeof o.content == 'function' ? o.content.call($e[0]) :  o.content)
-        || $e.attr('data-content')
-
-      return content
-    }
-
-  , tip: function () {
-      if (!this.$tip) {
-        this.$tip = $(this.options.template)
-      }
-      return this.$tip
-    }
-
-  , destroy: function () {
-      this.hide().$element.off('.' + this.type).removeData(this.type)
-    }
-
-  })
-
-
- /* POPOVER PLUGIN DEFINITION
-  * ======================= */
-
-  var old = $.fn.popover
-
-  $.fn.popover = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('popover')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('popover', (data = new Popover(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.popover.Constructor = Popover
-
-  $.fn.popover.defaults = $.extend({} , $.fn.tooltip.defaults, {
-    placement: 'right'
-  , trigger: 'click'
-  , content: ''
-  , template: '<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
-  })
-
-
- /* POPOVER NO CONFLICT
-  * =================== */
-
-  $.fn.popover.noConflict = function () {
-    $.fn.popover = old
-    return this
-  }
-
-}(window.jQuery);
-/* =============================================================
- * bootstrap-scrollspy.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#scrollspy
- * =============================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* SCROLLSPY CLASS DEFINITION
-  * ========================== */
-
-  function ScrollSpy(element, options) {
-    var process = $.proxy(this.process, this)
-      , $element = $(element).is('body') ? $(window) : $(element)
-      , href
-    this.options = $.extend({}, $.fn.scrollspy.defaults, options)
-    this.$scrollElement = $element.on('scroll.scroll-spy.data-api', process)
-    this.selector = (this.options.target
-      || ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
-      || '') + ' .nav li > a'
-    this.$body = $('body')
-    this.refresh()
-    this.process()
-  }
-
-  ScrollSpy.prototype = {
-
-      constructor: ScrollSpy
-
-    , refresh: function () {
-        var self = this
-          , $targets
-
-        this.offsets = $([])
-        this.targets = $([])
-
-        $targets = this.$body
-          .find(this.selector)
-          .map(function () {
-            var $el = $(this)
-              , href = $el.data('target') || $el.attr('href')
-              , $href = /^#\w/.test(href) && $(href)
-            return ( $href
-              && $href.length
-              && [[ $href.position().top + (!$.isWindow(self.$scrollElement.get(0)) && self.$scrollElement.scrollTop()), href ]] ) || null
-          })
-          .sort(function (a, b) { return a[0] - b[0] })
-          .each(function () {
-            self.offsets.push(this[0])
-            self.targets.push(this[1])
-          })
-      }
-
-    , process: function () {
-        var scrollTop = this.$scrollElement.scrollTop() + this.options.offset
-          , scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight
-          , maxScroll = scrollHeight - this.$scrollElement.height()
-          , offsets = this.offsets
-          , targets = this.targets
-          , activeTarget = this.activeTarget
-          , i
-
-        if (scrollTop >= maxScroll) {
-          return activeTarget != (i = targets.last()[0])
-            && this.activate ( i )
-        }
-
-        for (i = offsets.length; i--;) {
-          activeTarget != targets[i]
-            && scrollTop >= offsets[i]
-            && (!offsets[i + 1] || scrollTop <= offsets[i + 1])
-            && this.activate( targets[i] )
-        }
-      }
-
-    , activate: function (target) {
-        var active
-          , selector
-
-        this.activeTarget = target
-
-        $(this.selector)
-          .parent('.active')
-          .removeClass('active')
-
-        selector = this.selector
-          + '[data-target="' + target + '"],'
-          + this.selector + '[href="' + target + '"]'
-
-        active = $(selector)
-          .parent('li')
-          .addClass('active')
-
-        if (active.parent('.dropdown-menu').length)  {
-          active = active.closest('li.dropdown').addClass('active')
-        }
-
-        active.trigger('activate')
-      }
-
-  }
-
-
- /* SCROLLSPY PLUGIN DEFINITION
-  * =========================== */
-
-  var old = $.fn.scrollspy
-
-  $.fn.scrollspy = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('scrollspy')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('scrollspy', (data = new ScrollSpy(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.scrollspy.Constructor = ScrollSpy
-
-  $.fn.scrollspy.defaults = {
-    offset: 10
-  }
-
-
- /* SCROLLSPY NO CONFLICT
-  * ===================== */
-
-  $.fn.scrollspy.noConflict = function () {
-    $.fn.scrollspy = old
-    return this
-  }
-
-
- /* SCROLLSPY DATA-API
-  * ================== */
-
-  $(window).on('load', function () {
-    $('[data-spy="scroll"]').each(function () {
-      var $spy = $(this)
-      $spy.scrollspy($spy.data())
-    })
-  })
-
-}(window.jQuery);/* ========================================================
- * bootstrap-tab.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#tabs
- * ========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ======================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* TAB CLASS DEFINITION
-  * ==================== */
-
-  var Tab = function (element) {
-    this.element = $(element)
-  }
-
-  Tab.prototype = {
-
-    constructor: Tab
-
-  , show: function () {
-      var $this = this.element
-        , $ul = $this.closest('ul:not(.dropdown-menu)')
-        , selector = $this.attr('data-target')
-        , previous
-        , $target
-        , e
-
-      if (!selector) {
-        selector = $this.attr('href')
-        selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
-      }
-
-      if ( $this.parent('li').hasClass('active') ) return
-
-      previous = $ul.find('.active:last a')[0]
-
-      e = $.Event('show', {
-        relatedTarget: previous
-      })
-
-      $this.trigger(e)
-
-      if (e.isDefaultPrevented()) return
-
-      $target = $(selector)
-
-      this.activate($this.parent('li'), $ul)
-      this.activate($target, $target.parent(), function () {
-        $this.trigger({
-          type: 'shown'
-        , relatedTarget: previous
-        })
-      })
-    }
-
-  , activate: function ( element, container, callback) {
-      var $active = container.find('> .active')
-        , transition = callback
-            && $.support.transition
-            && $active.hasClass('fade')
-
-      function next() {
-        $active
-          .removeClass('active')
-          .find('> .dropdown-menu > .active')
-          .removeClass('active')
-
-        element.addClass('active')
-
-        if (transition) {
-          element[0].offsetWidth // reflow for transition
-          element.addClass('in')
-        } else {
-          element.removeClass('fade')
-        }
-
-        if ( element.parent('.dropdown-menu') ) {
-          element.closest('li.dropdown').addClass('active')
-        }
-
-        callback && callback()
-      }
-
-      transition ?
-        $active.one($.support.transition.end, next) :
-        next()
-
-      $active.removeClass('in')
-    }
-  }
-
-
- /* TAB PLUGIN DEFINITION
-  * ===================== */
-
-  var old = $.fn.tab
-
-  $.fn.tab = function ( option ) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('tab')
-      if (!data) $this.data('tab', (data = new Tab(this)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.tab.Constructor = Tab
-
-
- /* TAB NO CONFLICT
-  * =============== */
-
-  $.fn.tab.noConflict = function () {
-    $.fn.tab = old
-    return this
-  }
-
-
- /* TAB DATA-API
-  * ============ */
-
-  $(document).on('click.tab.data-api', '[data-toggle="tab"], [data-toggle="pill"]', function (e) {
-    e.preventDefault()
-    $(this).tab('show')
-  })
-
-}(window.jQuery);/* =============================================================
- * bootstrap-typeahead.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#typeahead
- * =============================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============================================================ */
-
-
-!function($){
-
-  "use strict"; // jshint ;_;
-
-
- /* TYPEAHEAD PUBLIC CLASS DEFINITION
-  * ================================= */
-
-  var Typeahead = function (element, options) {
-    this.$element = $(element)
-    this.options = $.extend({}, $.fn.typeahead.defaults, options)
-    this.matcher = this.options.matcher || this.matcher
-    this.sorter = this.options.sorter || this.sorter
-    this.highlighter = this.options.highlighter || this.highlighter
-    this.updater = this.options.updater || this.updater
-    this.source = this.options.source
-    this.$menu = $(this.options.menu)
-    this.shown = false
-    this.listen()
-  }
-
-  Typeahead.prototype = {
-
-    constructor: Typeahead
-
-  , select: function () {
-      var val = this.$menu.find('.active').attr('data-value')
-      this.$element
-        .val(this.updater(val))
-        .change()
-      return this.hide()
-    }
-
-  , updater: function (item) {
-      return item
-    }
-
-  , show: function () {
-      var pos = $.extend({}, this.$element.position(), {
-        height: this.$element[0].offsetHeight
-      })
-
-      this.$menu
-        .insertAfter(this.$element)
-        .css({
-          top: pos.top + pos.height
-        , left: pos.left
-        })
-        .show()
-
-      this.shown = true
-      return this
-    }
-
-  , hide: function () {
-      this.$menu.hide()
-      this.shown = false
-      return this
-    }
-
-  , lookup: function (event) {
-      var items
-
-      this.query = this.$element.val()
-
-      if (!this.query || this.query.length < this.options.minLength) {
-        return this.shown ? this.hide() : this
-      }
-
-      items = $.isFunction(this.source) ? this.source(this.query, $.proxy(this.process, this)) : this.source
-
-      return items ? this.process(items) : this
-    }
-
-  , process: function (items) {
-      var that = this
-
-      items = $.grep(items, function (item) {
-        return that.matcher(item)
-      })
-
-      items = this.sorter(items)
-
-      if (!items.length) {
-        return this.shown ? this.hide() : this
-      }
-
-      return this.render(items.slice(0, this.options.items)).show()
-    }
-
-  , matcher: function (item) {
-      return ~item.toLowerCase().indexOf(this.query.toLowerCase())
-    }
-
-  , sorter: function (items) {
-      var beginswith = []
-        , caseSensitive = []
-        , caseInsensitive = []
-        , item
-
-      while (item = items.shift()) {
-        if (!item.toLowerCase().indexOf(this.query.toLowerCase())) beginswith.push(item)
-        else if (~item.indexOf(this.query)) caseSensitive.push(item)
-        else caseInsensitive.push(item)
-      }
-
-      return beginswith.concat(caseSensitive, caseInsensitive)
-    }
-
-  , highlighter: function (item) {
-      var query = this.query.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g, '\\$&')
-      return item.replace(new RegExp('(' + query + ')', 'ig'), function ($1, match) {
-        return '<strong>' + match + '</strong>'
-      })
-    }
-
-  , render: function (items) {
-      var that = this
-
-      items = $(items).map(function (i, item) {
-        i = $(that.options.item).attr('data-value', item)
-        i.find('a').html(that.highlighter(item))
-        return i[0]
-      })
-
-      items.first().addClass('active')
-      this.$menu.html(items)
-      return this
-    }
-
-  , next: function (event) {
-      var active = this.$menu.find('.active').removeClass('active')
-        , next = active.next()
-
-      if (!next.length) {
-        next = $(this.$menu.find('li')[0])
-      }
-
-      next.addClass('active')
-    }
-
-  , prev: function (event) {
-      var active = this.$menu.find('.active').removeClass('active')
-        , prev = active.prev()
-
-      if (!prev.length) {
-        prev = this.$menu.find('li').last()
-      }
-
-      prev.addClass('active')
-    }
-
-  , listen: function () {
-      this.$element
-        .on('focus',    $.proxy(this.focus, this))
-        .on('blur',     $.proxy(this.blur, this))
-        .on('keypress', $.proxy(this.keypress, this))
-        .on('keyup',    $.proxy(this.keyup, this))
-
-      if (this.eventSupported('keydown')) {
-        this.$element.on('keydown', $.proxy(this.keydown, this))
-      }
-
-      this.$menu
-        .on('click', $.proxy(this.click, this))
-        .on('mouseenter', 'li', $.proxy(this.mouseenter, this))
-        .on('mouseleave', 'li', $.proxy(this.mouseleave, this))
-    }
-
-  , eventSupported: function(eventName) {
-      var isSupported = eventName in this.$element
-      if (!isSupported) {
-        this.$element.setAttribute(eventName, 'return;')
-        isSupported = typeof this.$element[eventName] === 'function'
-      }
-      return isSupported
-    }
-
-  , move: function (e) {
-      if (!this.shown) return
-
-      switch(e.keyCode) {
-        case 9: // tab
-        case 13: // enter
-        case 27: // escape
-          e.preventDefault()
-          break
-
-        case 38: // up arrow
-          e.preventDefault()
-          this.prev()
-          break
-
-        case 40: // down arrow
-          e.preventDefault()
-          this.next()
-          break
-      }
-
-      e.stopPropagation()
-    }
-
-  , keydown: function (e) {
-      this.suppressKeyPressRepeat = ~$.inArray(e.keyCode, [40,38,9,13,27])
-      this.move(e)
-    }
-
-  , keypress: function (e) {
-      if (this.suppressKeyPressRepeat) return
-      this.move(e)
-    }
-
-  , keyup: function (e) {
-      switch(e.keyCode) {
-        case 40: // down arrow
-        case 38: // up arrow
-        case 16: // shift
-        case 17: // ctrl
-        case 18: // alt
-          break
-
-        case 9: // tab
-        case 13: // enter
-          if (!this.shown) return
-          this.select()
-          break
-
-        case 27: // escape
-          if (!this.shown) return
-          this.hide()
-          break
-
-        default:
-          this.lookup()
-      }
-
-      e.stopPropagation()
-      e.preventDefault()
-  }
-
-  , focus: function (e) {
-      this.focused = true
-    }
-
-  , blur: function (e) {
-      this.focused = false
-      if (!this.mousedover && this.shown) this.hide()
-    }
-
-  , click: function (e) {
-      e.stopPropagation()
-      e.preventDefault()
-      this.select()
-      this.$element.focus()
-    }
-
-  , mouseenter: function (e) {
-      this.mousedover = true
-      this.$menu.find('.active').removeClass('active')
-      $(e.currentTarget).addClass('active')
-    }
-
-  , mouseleave: function (e) {
-      this.mousedover = false
-      if (!this.focused && this.shown) this.hide()
-    }
-
-  }
-
-
-  /* TYPEAHEAD PLUGIN DEFINITION
-   * =========================== */
-
-  var old = $.fn.typeahead
-
-  $.fn.typeahead = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('typeahead')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('typeahead', (data = new Typeahead(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.typeahead.defaults = {
-    source: []
-  , items: 8
-  , menu: '<ul class="typeahead dropdown-menu"></ul>'
-  , item: '<li><a href="#"></a></li>'
-  , minLength: 1
-  }
-
-  $.fn.typeahead.Constructor = Typeahead
-
-
- /* TYPEAHEAD NO CONFLICT
-  * =================== */
-
-  $.fn.typeahead.noConflict = function () {
-    $.fn.typeahead = old
-    return this
-  }
-
-
- /* TYPEAHEAD DATA-API
-  * ================== */
-
-  $(document).on('focus.typeahead.data-api', '[data-provide="typeahead"]', function (e) {
-    var $this = $(this)
-    if ($this.data('typeahead')) return
-    $this.typeahead($this.data())
-  })
-
-}(window.jQuery);
-/* ==========================================================
- * bootstrap-affix.js v2.3.2
- * http://getbootstrap.com/2.3.2/javascript.html#affix
- * ==========================================================
- * Copyright 2013 Twitter, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ========================================================== */
-
-
-!function ($) {
-
-  "use strict"; // jshint ;_;
-
-
- /* AFFIX CLASS DEFINITION
-  * ====================== */
-
-  var Affix = function (element, options) {
-    this.options = $.extend({}, $.fn.affix.defaults, options)
-    this.$window = $(window)
-      .on('scroll.affix.data-api', $.proxy(this.checkPosition, this))
-      .on('click.affix.data-api',  $.proxy(function () { setTimeout($.proxy(this.checkPosition, this), 1) }, this))
-    this.$element = $(element)
-    this.checkPosition()
-  }
-
-  Affix.prototype.checkPosition = function () {
-    if (!this.$element.is(':visible')) return
-
-    var scrollHeight = $(document).height()
-      , scrollTop = this.$window.scrollTop()
-      , position = this.$element.offset()
-      , offset = this.options.offset
-      , offsetBottom = offset.bottom
-      , offsetTop = offset.top
-      , reset = 'affix affix-top affix-bottom'
-      , affix
-
-    if (typeof offset != 'object') offsetBottom = offsetTop = offset
-    if (typeof offsetTop == 'function') offsetTop = offset.top()
-    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom()
-
-    affix = this.unpin != null && (scrollTop + this.unpin <= position.top) ?
-      false    : offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ?
-      'bottom' : offsetTop != null && scrollTop <= offsetTop ?
-      'top'    : false
-
-    if (this.affixed === affix) return
-
-    this.affixed = affix
-    this.unpin = affix == 'bottom' ? position.top - scrollTop : null
-
-    this.$element.removeClass(reset).addClass('affix' + (affix ? '-' + affix : ''))
-  }
-
-
- /* AFFIX PLUGIN DEFINITION
-  * ======================= */
-
-  var old = $.fn.affix
-
-  $.fn.affix = function (option) {
-    return this.each(function () {
-      var $this = $(this)
-        , data = $this.data('affix')
-        , options = typeof option == 'object' && option
-      if (!data) $this.data('affix', (data = new Affix(this, options)))
-      if (typeof option == 'string') data[option]()
-    })
-  }
-
-  $.fn.affix.Constructor = Affix
-
-  $.fn.affix.defaults = {
-    offset: 0
-  }
-
-
- /* AFFIX NO CONFLICT
-  * ================= */
-
-  $.fn.affix.noConflict = function () {
-    $.fn.affix = old
-    return this
-  }
-
-
- /* AFFIX DATA-API
-  * ============== */
-
-  $(window).on('load', function () {
-    $('[data-spy="affix"]').each(function () {
-      var $spy = $(this)
-        , data = $spy.data()
-
-      data.offset = data.offset || {}
-
-      data.offsetBottom && (data.offset.bottom = data.offsetBottom)
-      data.offsetTop && (data.offset.top = data.offsetTop)
-
-      $spy.affix(data)
-    })
-  })
-
-
-}(window.jQuery);
diff --git a/publish/assets/js/bootstrap.min.js b/publish/assets/js/bootstrap.min.js
deleted file mode 100644
index 94b68fb..0000000
--- a/publish/assets/js/bootstrap.min.js
+++ /dev/null
@@ -1,7 +0,0 @@
-/*!
-* Bootstrap.js by @fat & @mdo
-* Copyright 2013 Twitter, Inc.
-* http://www.apache.org/licenses/LICENSE-2.0.txt
-*/
-
-!function(e){"use strict";e(function(){e.support.transition=function(){var e=function(){var e=document.createElement("bootstrap"),t={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"},n;for(n in t)if(e.style[n]!==undefined)return t[n]}();return e&&{end:e}}()})}(window.jQuery),!function(e){"use strict";var t='[data-dismiss="alert"]',n=function(n){e(n).on("click",t,this.close)};n.prototype.close=function(t){function s(){i.trigger("closed").remove()}var n=e(this),r=n.attr("data-target"),i;r||(r=n.attr("href"),r=r&&r.replace(/.*(?=#[^\s]*$)/,"")),i=e(r),t&&t.preventDefault(),i.length||(i=n.hasClass("alert")?n:n.parent()),i.trigger(t=e.Event("close"));if(t.isDefaultPrevented())return;i.removeClass("in"),e.support.transition&&i.hasClass("fade")?i.on(e.support.transition.end,s):s()};var r=e.fn.alert;e.fn.alert=function(t){return this.each(function(){var r=e(this),i=r.data("alert");i||r.data("alert",i=new n(this)),typeof t=="string"&&i[t].call(r)})},e.fn.alert.Constructor=n,e.fn.alert.noConflict=function(){return e.fn.alert=r,this},e(document).on("click.alert.data-api",t,n.prototype.close)}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.button.defaults,n)};t.prototype.setState=function(e){var t="disabled",n=this.$element,r=n.data(),i=n.is("input")?"val":"html";e+="Text",r.resetText||n.data("resetText",n[i]()),n[i](r[e]||this.options[e]),setTimeout(function(){e=="loadingText"?n.addClass(t).attr(t,t):n.removeClass(t).removeAttr(t)},0)},t.prototype.toggle=function(){var e=this.$element.closest('[data-toggle="buttons-radio"]');e&&e.find(".active").removeClass("active"),this.$element.toggleClass("active")};var n=e.fn.button;e.fn.button=function(n){return this.each(function(){var r=e(this),i=r.data("button"),s=typeof n=="object"&&n;i||r.data("button",i=new t(this,s)),n=="toggle"?i.toggle():n&&i.setState(n)})},e.fn.button.defaults={loadingText:"loading..."},e.fn.button.Constructor=t,e.fn.button.noConflict=function(){return e.fn.button=n,this},e(document).on("click.button.data-api","[data-toggle^=button]",function(t){var n=e(t.target);n.hasClass("btn")||(n=n.closest(".btn")),n.button("toggle")})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.$indicators=this.$element.find(".carousel-indicators"),this.options=n,this.options.pause=="hover"&&this.$element.on("mouseenter",e.proxy(this.pause,this)).on("mouseleave",e.proxy(this.cycle,this))};t.prototype={cycle:function(t){return t||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(e.proxy(this.next,this),this.options.interval)),this},getActiveIndex:function(){return this.$active=this.$element.find(".item.active"),this.$items=this.$active.parent().children(),this.$items.index(this.$active)},to:function(t){var n=this.getActiveIndex(),r=this;if(t>this.$items.length-1||t<0)return;return this.sliding?this.$element.one("slid",function(){r.to(t)}):n==t?this.pause().cycle():this.slide(t>n?"next":"prev",e(this.$items[t]))},pause:function(t){return t||(this.paused=!0),this.$element.find(".next, .prev").length&&e.support.transition.end&&(this.$element.trigger(e.support.transition.end),this.cycle(!0)),clearInterval(this.interval),this.interval=null,this},next:function(){if(this.sliding)return;return this.slide("next")},prev:function(){if(this.sliding)return;return this.slide("prev")},slide:function(t,n){var r=this.$element.find(".item.active"),i=n||r[t](),s=this.interval,o=t=="next"?"left":"right",u=t=="next"?"first":"last",a=this,f;this.sliding=!0,s&&this.pause(),i=i.length?i:this.$element.find(".item")[u](),f=e.Event("slide",{relatedTarget:i[0],direction:o});if(i.hasClass("active"))return;this.$indicators.length&&(this.$indicators.find(".active").removeClass("active"),this.$element.one("slid",function(){var t=e(a.$indicators.children()[a.getActiveIndex()]);t&&t.addClass("active")}));if(e.support.transition&&this.$element.hasClass("slide")){this.$element.trigger(f);if(f.isDefaultPrevented())return;i.addClass(t),i[0].offsetWidth,r.addClass(o),i.addClass(o),this.$element.one(e.support.transition.end,function(){i.removeClass([t,o].join(" ")).addClass("active"),r.removeClass(["active",o].join(" ")),a.sliding=!1,setTimeout(function(){a.$element.trigger("slid")},0)})}else{this.$element.trigger(f);if(f.isDefaultPrevented())return;r.removeClass("active"),i.addClass("active"),this.sliding=!1,this.$element.trigger("slid")}return s&&this.cycle(),this}};var n=e.fn.carousel;e.fn.carousel=function(n){return this.each(function(){var r=e(this),i=r.data("carousel"),s=e.extend({},e.fn.carousel.defaults,typeof n=="object"&&n),o=typeof n=="string"?n:s.slide;i||r.data("carousel",i=new t(this,s)),typeof n=="number"?i.to(n):o?i[o]():s.interval&&i.pause().cycle()})},e.fn.carousel.defaults={interval:5e3,pause:"hover"},e.fn.carousel.Constructor=t,e.fn.carousel.noConflict=function(){return e.fn.carousel=n,this},e(document).on("click.carousel.data-api","[data-slide], [data-slide-to]",function(t){var n=e(this),r,i=e(n.attr("data-target")||(r=n.attr("href"))&&r.replace(/.*(?=#[^\s]+$)/,"")),s=e.extend({},i.data(),n.data()),o;i.carousel(s),(o=n.attr("data-slide-to"))&&i.data("carousel").pause().to(o).cycle(),t.preventDefault()})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.collapse.defaults,n),this.options.parent&&(this.$parent=e(this.options.parent)),this.options.toggle&&this.toggle()};t.prototype={constructor:t,dimension:function(){var e=this.$element.hasClass("width");return e?"width":"height"},show:function(){var t,n,r,i;if(this.transitioning||this.$element.hasClass("in"))return;t=this.dimension(),n=e.camelCase(["scroll",t].join("-")),r=this.$parent&&this.$parent.find("> .accordion-group > .in");if(r&&r.length){i=r.data("collapse");if(i&&i.transitioning)return;r.collapse("hide"),i||r.data("collapse",null)}this.$element[t](0),this.transition("addClass",e.Event("show"),"shown"),e.support.transition&&this.$element[t](this.$element[0][n])},hide:function(){var t;if(this.transitioning||!this.$element.hasClass("in"))return;t=this.dimension(),this.reset(this.$element[t]()),this.transition("removeClass",e.Event("hide"),"hidden"),this.$element[t](0)},reset:function(e){var t=this.dimension();return this.$element.removeClass("collapse")[t](e||"auto")[0].offsetWidth,this.$element[e!==null?"addClass":"removeClass"]("collapse"),this},transition:function(t,n,r){var i=this,s=function(){n.type=="show"&&i.reset(),i.transitioning=0,i.$element.trigger(r)};this.$element.trigger(n);if(n.isDefaultPrevented())return;this.transitioning=1,this.$element[t]("in"),e.support.transition&&this.$element.hasClass("collapse")?this.$element.one(e.support.transition.end,s):s()},toggle:function(){this[this.$element.hasClass("in")?"hide":"show"]()}};var n=e.fn.collapse;e.fn.collapse=function(n){return this.each(function(){var r=e(this),i=r.data("collapse"),s=e.extend({},e.fn.collapse.defaults,r.data(),typeof n=="object"&&n);i||r.data("collapse",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.collapse.defaults={toggle:!0},e.fn.collapse.Constructor=t,e.fn.collapse.noConflict=function(){return e.fn.collapse=n,this},e(document).on("click.collapse.data-api","[data-toggle=collapse]",function(t){var n=e(this),r,i=n.attr("data-target")||t.preventDefault()||(r=n.attr("href"))&&r.replace(/.*(?=#[^\s]+$)/,""),s=e(i).data("collapse")?"toggle":n.data();n[e(i).hasClass("in")?"addClass":"removeClass"]("collapsed"),e(i).collapse(s)})}(window.jQuery),!function(e){"use strict";function r(){e(".dropdown-backdrop").remove(),e(t).each(function(){i(e(this)).removeClass("open")})}function i(t){var n=t.attr("data-target"),r;n||(n=t.attr("href"),n=n&&/#/.test(n)&&n.replace(/.*(?=#[^\s]*$)/,"")),r=n&&e(n);if(!r||!r.length)r=t.parent();return r}var t="[data-toggle=dropdown]",n=function(t){var n=e(t).on("click.dropdown.data-api",this.toggle);e("html").on("click.dropdown.data-api",function(){n.parent().removeClass("open")})};n.prototype={constructor:n,toggle:function(t){var n=e(this),s,o;if(n.is(".disabled, :disabled"))return;return s=i(n),o=s.hasClass("open"),r(),o||("ontouchstart"in document.documentElement&&e('<div class="dropdown-backdrop"/>').insertBefore(e(this)).on("click",r),s.toggleClass("open")),n.focus(),!1},keydown:function(n){var r,s,o,u,a,f;if(!/(38|40|27)/.test(n.keyCode))return;r=e(this),n.preventDefault(),n.stopPropagation();if(r.is(".disabled, :disabled"))return;u=i(r),a=u.hasClass("open");if(!a||a&&n.keyCode==27)return n.which==27&&u.find(t).focus(),r.click();s=e("[role=menu] li:not(.divider):visible a",u);if(!s.length)return;f=s.index(s.filter(":focus")),n.keyCode==38&&f>0&&f--,n.keyCode==40&&f<s.length-1&&f++,~f||(f=0),s.eq(f).focus()}};var s=e.fn.dropdown;e.fn.dropdown=function(t){return this.each(function(){var r=e(this),i=r.data("dropdown");i||r.data("dropdown",i=new n(this)),typeof t=="string"&&i[t].call(r)})},e.fn.dropdown.Constructor=n,e.fn.dropdown.noConflict=function(){return e.fn.dropdown=s,this},e(document).on("click.dropdown.data-api",r).on("click.dropdown.data-api",".dropdown form",function(e){e.stopPropagation()}).on("click.dropdown.data-api",t,n.prototype.toggle).on("keydown.dropdown.data-api",t+", [role=menu]",n.prototype.keydown)}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.options=n,this.$element=e(t).delegate('[data-dismiss="modal"]',"click.dismiss.modal",e.proxy(this.hide,this)),this.options.remote&&this.$element.find(".modal-body").load(this.options.remote)};t.prototype={constructor:t,toggle:function(){return this[this.isShown?"hide":"show"]()},show:function(){var t=this,n=e.Event("show");this.$element.trigger(n);if(this.isShown||n.isDefaultPrevented())return;this.isShown=!0,this.escape(),this.backdrop(function(){var n=e.support.transition&&t.$element.hasClass("fade");t.$element.parent().length||t.$element.appendTo(document.body),t.$element.show(),n&&t.$element[0].offsetWidth,t.$element.addClass("in").attr("aria-hidden",!1),t.enforceFocus(),n?t.$element.one(e.support.transition.end,function(){t.$element.focus().trigger("shown")}):t.$element.focus().trigger("shown")})},hide:function(t){t&&t.preventDefault();var n=this;t=e.Event("hide"),this.$element.trigger(t);if(!this.isShown||t.isDefaultPrevented())return;this.isShown=!1,this.escape(),e(document).off("focusin.modal"),this.$element.removeClass("in").attr("aria-hidden",!0),e.support.transition&&this.$element.hasClass("fade")?this.hideWithTransition():this.hideModal()},enforceFocus:function(){var t=this;e(document).on("focusin.modal",function(e){t.$element[0]!==e.target&&!t.$element.has(e.target).length&&t.$element.focus()})},escape:function(){var e=this;this.isShown&&this.options.keyboard?this.$element.on("keyup.dismiss.modal",function(t){t.which==27&&e.hide()}):this.isShown||this.$element.off("keyup.dismiss.modal")},hideWithTransition:function(){var t=this,n=setTimeout(function(){t.$element.off(e.support.transition.end),t.hideModal()},500);this.$element.one(e.support.transition.end,function(){clearTimeout(n),t.hideModal()})},hideModal:function(){var e=this;this.$element.hide(),this.backdrop(function(){e.removeBackdrop(),e.$element.trigger("hidden")})},removeBackdrop:function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},backdrop:function(t){var n=this,r=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var i=e.support.transition&&r;this.$backdrop=e('<div class="modal-backdrop '+r+'" />').appendTo(document.body),this.$backdrop.click(this.options.backdrop=="static"?e.proxy(this.$element[0].focus,this.$element[0]):e.proxy(this.hide,this)),i&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in");if(!t)return;i?this.$backdrop.one(e.support.transition.end,t):t()}else!this.isShown&&this.$backdrop?(this.$backdrop.removeClass("in"),e.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one(e.support.transition.end,t):t()):t&&t()}};var n=e.fn.modal;e.fn.modal=function(n){return this.each(function(){var r=e(this),i=r.data("modal"),s=e.extend({},e.fn.modal.defaults,r.data(),typeof n=="object"&&n);i||r.data("modal",i=new t(this,s)),typeof n=="string"?i[n]():s.show&&i.show()})},e.fn.modal.defaults={backdrop:!0,keyboard:!0,show:!0},e.fn.modal.Constructor=t,e.fn.modal.noConflict=function(){return e.fn.modal=n,this},e(document).on("click.modal.data-api",'[data-toggle="modal"]',function(t){var n=e(this),r=n.attr("href"),i=e(n.attr("data-target")||r&&r.replace(/.*(?=#[^\s]+$)/,"")),s=i.data("modal")?"toggle":e.extend({remote:!/#/.test(r)&&r},i.data(),n.data());t.preventDefault(),i.modal(s).one("hide",function(){n.focus()})})}(window.jQuery),!function(e){"use strict";var t=function(e,t){this.init("tooltip",e,t)};t.prototype={constructor:t,init:function(t,n,r){var i,s,o,u,a;this.type=t,this.$element=e(n),this.options=this.getOptions(r),this.enabled=!0,o=this.options.trigger.split(" ");for(a=o.length;a--;)u=o[a],u=="click"?this.$element.on("click."+this.type,this.options.selector,e.proxy(this.toggle,this)):u!="manual"&&(i=u=="hover"?"mouseenter":"focus",s=u=="hover"?"mouseleave":"blur",this.$element.on(i+"."+this.type,this.options.selector,e.proxy(this.enter,this)),this.$element.on(s+"."+this.type,this.options.selector,e.proxy(this.leave,this)));this.options.selector?this._options=e.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},getOptions:function(t){return t=e.extend({},e.fn[this.type].defaults,this.$element.data(),t),t.delay&&typeof t.delay=="number"&&(t.delay={show:t.delay,hide:t.delay}),t},enter:function(t){var n=e.fn[this.type].defaults,r={},i;this._options&&e.each(this._options,function(e,t){n[e]!=t&&(r[e]=t)},this),i=e(t.currentTarget)[this.type](r).data(this.type);if(!i.options.delay||!i.options.delay.show)return i.show();clearTimeout(this.timeout),i.hoverState="in",this.timeout=setTimeout(function(){i.hoverState=="in"&&i.show()},i.options.delay.show)},leave:function(t){var n=e(t.currentTarget)[this.type](this._options).data(this.type);this.timeout&&clearTimeout(this.timeout);if(!n.options.delay||!n.options.delay.hide)return n.hide();n.hoverState="out",this.timeout=setTimeout(function(){n.hoverState=="out"&&n.hide()},n.options.delay.hide)},show:function(){var t,n,r,i,s,o,u=e.Event("show");if(this.hasContent()&&this.enabled){this.$element.trigger(u);if(u.isDefaultPrevented())return;t=this.tip(),this.setContent(),this.options.animation&&t.addClass("fade"),s=typeof this.options.placement=="function"?this.options.placement.call(this,t[0],this.$element[0]):this.options.placement,t.detach().css({top:0,left:0,display:"block"}),this.options.container?t.appendTo(this.options.container):t.insertAfter(this.$element),n=this.getPosition(),r=t[0].offsetWidth,i=t[0].offsetHeight;switch(s){case"bottom":o={top:n.top+n.height,left:n.left+n.width/2-r/2};break;case"top":o={top:n.top-i,left:n.left+n.width/2-r/2};break;case"left":o={top:n.top+n.height/2-i/2,left:n.left-r};break;case"right":o={top:n.top+n.height/2-i/2,left:n.left+n.width}}this.applyPlacement(o,s),this.$element.trigger("shown")}},applyPlacement:function(e,t){var n=this.tip(),r=n[0].offsetWidth,i=n[0].offsetHeight,s,o,u,a;n.offset(e).addClass(t).addClass("in"),s=n[0].offsetWidth,o=n[0].offsetHeight,t=="top"&&o!=i&&(e.top=e.top+i-o,a=!0),t=="bottom"||t=="top"?(u=0,e.left<0&&(u=e.left*-2,e.left=0,n.offset(e),s=n[0].offsetWidth,o=n[0].offsetHeight),this.replaceArrow(u-r+s,s,"left")):this.replaceArrow(o-i,o,"top"),a&&n.offset(e)},replaceArrow:function(e,t,n){this.arrow().css(n,e?50*(1-e/t)+"%":"")},setContent:function(){var e=this.tip(),t=this.getTitle();e.find(".tooltip-inner")[this.options.html?"html":"text"](t),e.removeClass("fade in top bottom left right")},hide:function(){function i(){var t=setTimeout(function(){n.off(e.support.transition.end).detach()},500);n.one(e.support.transition.end,function(){clearTimeout(t),n.detach()})}var t=this,n=this.tip(),r=e.Event("hide");this.$element.trigger(r);if(r.isDefaultPrevented())return;return n.removeClass("in"),e.support.transition&&this.$tip.hasClass("fade")?i():n.detach(),this.$element.trigger("hidden"),this},fixTitle:function(){var e=this.$element;(e.attr("title")||typeof e.attr("data-original-title")!="string")&&e.attr("data-original-title",e.attr("title")||"").attr("title","")},hasContent:function(){return this.getTitle()},getPosition:function(){var t=this.$element[0];return e.extend({},typeof t.getBoundingClientRect=="function"?t.getBoundingClientRect():{width:t.offsetWidth,height:t.offsetHeight},this.$element.offset())},getTitle:function(){var e,t=this.$element,n=this.options;return e=t.attr("data-original-title")||(typeof n.title=="function"?n.title.call(t[0]):n.title),e},tip:function(){return this.$tip=this.$tip||e(this.options.template)},arrow:function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},validate:function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},enable:function(){this.enabled=!0},disable:function(){this.enabled=!1},toggleEnabled:function(){this.enabled=!this.enabled},toggle:function(t){var n=t?e(t.currentTarget)[this.type](this._options).data(this.type):this;n.tip().hasClass("in")?n.hide():n.show()},destroy:function(){this.hide().$element.off("."+this.type).removeData(this.type)}};var n=e.fn.tooltip;e.fn.tooltip=function(n){return this.each(function(){var r=e(this),i=r.data("tooltip"),s=typeof n=="object"&&n;i||r.data("tooltip",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.tooltip.Constructor=t,e.fn.tooltip.defaults={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1},e.fn.tooltip.noConflict=function(){return e.fn.tooltip=n,this}}(window.jQuery),!function(e){"use strict";var t=function(e,t){this.init("popover",e,t)};t.prototype=e.extend({},e.fn.tooltip.Constructor.prototype,{constructor:t,setContent:function(){var e=this.tip(),t=this.getTitle(),n=this.getContent();e.find(".popover-title")[this.options.html?"html":"text"](t),e.find(".popover-content")[this.options.html?"html":"text"](n),e.removeClass("fade top bottom left right in")},hasContent:function(){return this.getTitle()||this.getContent()},getContent:function(){var e,t=this.$element,n=this.options;return e=(typeof n.content=="function"?n.content.call(t[0]):n.content)||t.attr("data-content"),e},tip:function(){return this.$tip||(this.$tip=e(this.options.template)),this.$tip},destroy:function(){this.hide().$element.off("."+this.type).removeData(this.type)}});var n=e.fn.popover;e.fn.popover=function(n){return this.each(function(){var r=e(this),i=r.data("popover"),s=typeof n=="object"&&n;i||r.data("popover",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.popover.Constructor=t,e.fn.popover.defaults=e.extend({},e.fn.tooltip.defaults,{placement:"right",trigger:"click",content:"",template:'<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),e.fn.popover.noConflict=function(){return e.fn.popover=n,this}}(window.jQuery),!function(e){"use strict";function t(t,n){var r=e.proxy(this.process,this),i=e(t).is("body")?e(window):e(t),s;this.options=e.extend({},e.fn.scrollspy.defaults,n),this.$scrollElement=i.on("scroll.scroll-spy.data-api",r),this.selector=(this.options.target||(s=e(t).attr("href"))&&s.replace(/.*(?=#[^\s]+$)/,"")||"")+" .nav li > a",this.$body=e("body"),this.refresh(),this.process()}t.prototype={constructor:t,refresh:function(){var t=this,n;this.offsets=e([]),this.targets=e([]),n=this.$body.find(this.selector).map(function(){var n=e(this),r=n.data("target")||n.attr("href"),i=/^#\w/.test(r)&&e(r);return i&&i.length&&[[i.position().top+(!e.isWindow(t.$scrollElement.get(0))&&t.$scrollElement.scrollTop()),r]]||null}).sort(function(e,t){return e[0]-t[0]}).each(function(){t.offsets.push(this[0]),t.targets.push(this[1])})},process:function(){var e=this.$scrollElement.scrollTop()+this.options.offset,t=this.$scrollElement[0].scrollHeight||this.$body[0].scrollHeight,n=t-this.$scrollElement.height(),r=this.offsets,i=this.targets,s=this.activeTarget,o;if(e>=n)return s!=(o=i.last()[0])&&this.activate(o);for(o=r.length;o--;)s!=i[o]&&e>=r[o]&&(!r[o+1]||e<=r[o+1])&&this.activate(i[o])},activate:function(t){var n,r;this.activeTarget=t,e(this.selector).parent(".active").removeClass("active"),r=this.selector+'[data-target="'+t+'"],'+this.selector+'[href="'+t+'"]',n=e(r).parent("li").addClass("active"),n.parent(".dropdown-menu").length&&(n=n.closest("li.dropdown").addClass("active")),n.trigger("activate")}};var n=e.fn.scrollspy;e.fn.scrollspy=function(n){return this.each(function(){var r=e(this),i=r.data("scrollspy"),s=typeof n=="object"&&n;i||r.data("scrollspy",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.scrollspy.Constructor=t,e.fn.scrollspy.defaults={offset:10},e.fn.scrollspy.noConflict=function(){return e.fn.scrollspy=n,this},e(window).on("load",function(){e('[data-spy="scroll"]').each(function(){var t=e(this);t.scrollspy(t.data())})})}(window.jQuery),!function(e){"use strict";var t=function(t){this.element=e(t)};t.prototype={constructor:t,show:function(){var t=this.element,n=t.closest("ul:not(.dropdown-menu)"),r=t.attr("data-target"),i,s,o;r||(r=t.attr("href"),r=r&&r.replace(/.*(?=#[^\s]*$)/,""));if(t.parent("li").hasClass("active"))return;i=n.find(".active:last a")[0],o=e.Event("show",{relatedTarget:i}),t.trigger(o);if(o.isDefaultPrevented())return;s=e(r),this.activate(t.parent("li"),n),this.activate(s,s.parent(),function(){t.trigger({type:"shown",relatedTarget:i})})},activate:function(t,n,r){function o(){i.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),t.addClass("active"),s?(t[0].offsetWidth,t.addClass("in")):t.removeClass("fade"),t.parent(".dropdown-menu")&&t.closest("li.dropdown").addClass("active"),r&&r()}var i=n.find("> .active"),s=r&&e.support.transition&&i.hasClass("fade");s?i.one(e.support.transition.end,o):o(),i.removeClass("in")}};var n=e.fn.tab;e.fn.tab=function(n){return this.each(function(){var r=e(this),i=r.data("tab");i||r.data("tab",i=new t(this)),typeof n=="string"&&i[n]()})},e.fn.tab.Constructor=t,e.fn.tab.noConflict=function(){return e.fn.tab=n,this},e(document).on("click.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(t){t.preventDefault(),e(this).tab("show")})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.typeahead.defaults,n),this.matcher=this.options.matcher||this.matcher,this.sorter=this.options.sorter||this.sorter,this.highlighter=this.options.highlighter||this.highlighter,this.updater=this.options.updater||this.updater,this.source=this.options.source,this.$menu=e(this.options.menu),this.shown=!1,this.listen()};t.prototype={constructor:t,select:function(){var e=this.$menu.find(".active").attr("data-value");return this.$element.val(this.updater(e)).change(),this.hide()},updater:function(e){return e},show:function(){var t=e.extend({},this.$element.position(),{height:this.$element[0].offsetHeight});return this.$menu.insertAfter(this.$element).css({top:t.top+t.height,left:t.left}).show(),this.shown=!0,this},hide:function(){return this.$menu.hide(),this.shown=!1,this},lookup:function(t){var n;return this.query=this.$element.val(),!this.query||this.query.length<this.options.minLength?this.shown?this.hide():this:(n=e.isFunction(this.source)?this.source(this.query,e.proxy(this.process,this)):this.source,n?this.process(n):this)},process:function(t){var n=this;return t=e.grep(t,function(e){return n.matcher(e)}),t=this.sorter(t),t.length?this.render(t.slice(0,this.options.items)).show():this.shown?this.hide():this},matcher:function(e){return~e.toLowerCase().indexOf(this.query.toLowerCase())},sorter:function(e){var t=[],n=[],r=[],i;while(i=e.shift())i.toLowerCase().indexOf(this.query.toLowerCase())?~i.indexOf(this.query)?n.push(i):r.push(i):t.push(i);return t.concat(n,r)},highlighter:function(e){var t=this.query.replace(/[\-\[\]{}()*+?.,\\\^$|#\s]/g,"\\$&");return e.replace(new RegExp("("+t+")","ig"),function(e,t){return"<strong>"+t+"</strong>"})},render:function(t){var n=this;return t=e(t).map(function(t,r){return t=e(n.options.item).attr("data-value",r),t.find("a").html(n.highlighter(r)),t[0]}),t.first().addClass("active"),this.$menu.html(t),this},next:function(t){var n=this.$menu.find(".active").removeClass("active"),r=n.next();r.length||(r=e(this.$menu.find("li")[0])),r.addClass("active")},prev:function(e){var t=this.$menu.find(".active").removeClass("active"),n=t.prev();n.length||(n=this.$menu.find("li").last()),n.addClass("active")},listen:function(){this.$element.on("focus",e.proxy(this.focus,this)).on("blur",e.proxy(this.blur,this)).on("keypress",e.proxy(this.keypress,this)).on("keyup",e.proxy(this.keyup,this)),this.eventSupported("keydown")&&this.$element.on("keydown",e.proxy(this.keydown,this)),this.$menu.on("click",e.proxy(this.click,this)).on("mouseenter","li",e.proxy(this.mouseenter,this)).on("mouseleave","li",e.proxy(this.mouseleave,this))},eventSupported:function(e){var t=e in this.$element;return t||(this.$element.setAttribute(e,"return;"),t=typeof this.$element[e]=="function"),t},move:function(e){if(!this.shown)return;switch(e.keyCode){case 9:case 13:case 27:e.preventDefault();break;case 38:e.preventDefault(),this.prev();break;case 40:e.preventDefault(),this.next()}e.stopPropagation()},keydown:function(t){this.suppressKeyPressRepeat=~e.inArray(t.keyCode,[40,38,9,13,27]),this.move(t)},keypress:function(e){if(this.suppressKeyPressRepeat)return;this.move(e)},keyup:function(e){switch(e.keyCode){case 40:case 38:case 16:case 17:case 18:break;case 9:case 13:if(!this.shown)return;this.select();break;case 27:if(!this.shown)return;this.hide();break;default:this.lookup()}e.stopPropagation(),e.preventDefault()},focus:function(e){this.focused=!0},blur:function(e){this.focused=!1,!this.mousedover&&this.shown&&this.hide()},click:function(e){e.stopPropagation(),e.preventDefault(),this.select(),this.$element.focus()},mouseenter:function(t){this.mousedover=!0,this.$menu.find(".active").removeClass("active"),e(t.currentTarget).addClass("active")},mouseleave:function(e){this.mousedover=!1,!this.focused&&this.shown&&this.hide()}};var n=e.fn.typeahead;e.fn.typeahead=function(n){return this.each(function(){var r=e(this),i=r.data("typeahead"),s=typeof n=="object"&&n;i||r.data("typeahead",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.typeahead.defaults={source:[],items:8,menu:'<ul class="typeahead dropdown-menu"></ul>',item:'<li><a href="#"></a></li>',minLength:1},e.fn.typeahead.Constructor=t,e.fn.typeahead.noConflict=function(){return e.fn.typeahead=n,this},e(document).on("focus.typeahead.data-api",'[data-provide="typeahead"]',function(t){var n=e(this);if(n.data("typeahead"))return;n.typeahead(n.data())})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.options=e.extend({},e.fn.affix.defaults,n),this.$window=e(window).on("scroll.affix.data-api",e.proxy(this.checkPosition,this)).on("click.affix.data-api",e.proxy(function(){setTimeout(e.proxy(this.checkPosition,this),1)},this)),this.$element=e(t),this.checkPosition()};t.prototype.checkPosition=function(){if(!this.$element.is(":visible"))return;var t=e(document).height(),n=this.$window.scrollTop(),r=this.$element.offset(),i=this.options.offset,s=i.bottom,o=i.top,u="affix affix-top affix-bottom",a;typeof i!="object"&&(s=o=i),typeof o=="function"&&(o=i.top()),typeof s=="function"&&(s=i.bottom()),a=this.unpin!=null&&n+this.unpin<=r.top?!1:s!=null&&r.top+this.$element.height()>=t-s?"bottom":o!=null&&n<=o?"top":!1;if(this.affixed===a)return;this.affixed=a,this.unpin=a=="bottom"?r.top-n:null,this.$element.removeClass(u).addClass("affix"+(a?"-"+a:""))};var n=e.fn.affix;e.fn.affix=function(n){return this.each(function(){var r=e(this),i=r.data("affix"),s=typeof n=="object"&&n;i||r.data("affix",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.affix.Constructor=t,e.fn.affix.defaults={offset:0},e.fn.affix.noConflict=function(){return e.fn.affix=n,this},e(window).on("load",function(){e('[data-spy="affix"]').each(function(){var t=e(this),n=t.data();n.offset=n.offset||{},n.offsetBottom&&(n.offset.bottom=n.offsetBottom),n.offsetTop&&(n.offset.top=n.offsetTop),t.affix(n)})})}(window.jQuery);
diff --git a/publish/assets/js/jquery-1.11.1.min.js b/publish/assets/js/jquery-1.11.1.min.js
deleted file mode 100644
index af111a2..0000000
--- a/publish/assets/js/jquery-1.11.1.min.js
+++ /dev/null
@@ -1,5 +0,0 @@
-/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
-
-!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="<select msallowclip=''><option selected=''></option></select>",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;
-if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"submitBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fb=/ jQuery\d+="(?:null|\d+)"/g,gb=new RegExp("<(?:"+eb+")[\\s/>]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/<tbody/i,lb=/<|&#?\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\s*(?:[^=]|=\s*.checked.)/i,ob=/^$|\/(?:java|ecma)script/i,pb=/^true\/(.*)/,qb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,rb={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?"<table>"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.style.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\([^)]*\)/i,Nb=/opacity\s*=\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp("^("+S+")(.*)$","i"),Qb=new RegExp("^([+-])=("+S+")","i"),Rb={position:"absolute",visibility:"hidden",display:"block"},Sb={letterSpacing:"0",fontWeight:"400"},Tb=["Webkit","O","Moz","ms"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fb(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));return g}function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),"normal"===f&&b in Sb&&(f=Sb[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Mb,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+" "+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Jb,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")
-},cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cc=/queueHooks$/,dc=[ic],ec={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fb(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fb(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc("show"),slideUp:gc("hide"),slideToggle:gc("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lc=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(lc,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var uc=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(uc," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vc=m.now(),wc=/\?/,xc=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\/\//,Gc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hc={},Ic={},Jc="*/".concat("*");try{zc=location.href}catch(Kc){zc=y.createElement("a"),zc.href="",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:"GET",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+"").replace(Ac,"").replace(Fc,yc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yc[3]||("http:"===yc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,"$1_="+vc++):e+(wc.test(e)?"&":"?")+"_="+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("If-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jc+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\[\]$/,Sc=/\r?\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vc(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join("&").replace(Qc,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,"\r\n")}}):{name:b.name,value:c.replace(Sc,"\r\n")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on("unload",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&"withCredentials"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+"");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,"string"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=""}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}m.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),m.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=y.head||m("head")[0]||y.documentElement;return{send:function(d,e){b=y.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\?(?=&|$)|\?\?/;m.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=_c.pop()||m.expando+"_"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&ad.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,"$1"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||m.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),m.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if("string"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(f="POST"),g.length>0&&m.ajax({url:a,type:f,dataType:"html",data:b}).done(function(a){e=arguments,g.html(d?m("<div>").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m});
diff --git a/publish/blog/aurora-0-6-0-incubating-released/index.html b/publish/blog/aurora-0-6-0-incubating-released/index.html
deleted file mode 100644
index 4395943..0000000
--- a/publish/blog/aurora-0-6-0-incubating-released/index.html
+++ /dev/null
@@ -1,157 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <div class="container-fluid">
-
-<div class="row">
-<div class="col-md-3 buffer">
-	<div class="meta">
-		<span class="author">
-			
-			<span class="author_contact">
-			  <p><strong>Bill Farner</strong></p>
-			  <p><a href="http://twitter.com/wfarner">@wfarner</a></p>
-			</span>
-		</span>
-		<p><em>Posted December  8, 2014</em></p>
-	</div>
-	
-	<div class="share">
-		<span class="social-share-button"><a href="https://twitter.com/share" class="twitter-share-button" data-via="apachemesos">Tweet</a></span>
-		
-		<span><script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script></span>
-		
-		<script src="//platform.linkedin.com/in.js" type="text/javascript">
-		 lang: en_US
-		</script>
-		<script type="IN/Share" data-counter="right"></script>
-	</div>
-</div>
-
-<div class="post col-md-9">
-	<h1>Aurora 0.6.0-incubating Released</h1>
-	
-	<p>The latest Apache Aurora release, 0.6.0-incubating, is now available for download. This version marks the second Aurora release since becoming part of the <a href="http://incubator.apache.org">Apache Incubator</a>, and includes the following features and improvements:</p>
-
-<ul>
-<li>Introduced highly-available, scheduler-driven job updates in beta (<a href="https://issues.apache.org/jira/browse/AURORA-610">AURORA-610</a>)</li>
-<li>Improvements to the Aurora web UI, including visualization of job updates</li>
-<li>Added automatic service registration in ZooKeeper (<a href="https://issues.apache.org/jira/browse/AURORA-587">AURORA-587</a>)</li>
-<li>Updates to Aurora client version two, deprecation of v1 coming in future release</li>
-<li>Lots of new  <a href="http://aurora.incubator.apache.org/documentation/latest/">documentation</a>. Documentation now includes pages for <a href="http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/">deploying the Aurora scheduler</a>, <a href="http://aurora.incubator.apache.org/documentation/latest/cron-jobs/">cron jobs</a>, <a href="http://aurora.incubator.apache.org/documentation/latest/sla/">SLA measurement</a>, <a href="http://aurora.incubator.apache.org/documentation/latest/storage/">storage</a>, and <a href="http://aurora.incubator.apache.org/documentation/latest/storage-config/">storage configuration and measurement</a>.</li>
-</ul>
-
-<p>Full release notes are available in the release <a href="https://git-wip-us.apache.org/repos/asf?p=incubator-aurora.git&amp;f=CHANGELOG&amp;hb=0.6.0-rc2">CHANGELOG</a>.</p>
-
-<h2 id="highly-available,-scheduler-driven-updates">Highly-available, scheduler-driven updates</h2>
-
-<p>Rolling updates of services is a crucial feature in Aurora. As such, we
-want to take great care when changing its behavior. Previously, Aurora operated
-by delegating this functionality to the client (or any API client, for that
-matter). In this version, the scheduler can take over the 
-responsibility of application update orchestration. Further details were discussed on the <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/201407.mbox/%3CCAGRA8uMxwVDokp_iHXhNru2gd-x_nM%2BDYAurpfAO6wuX7%3DnHFw%40mail.gmail.com%3E">Aurora mailing list</a>.</p>
-
-<h2 id="aurora-web-ui-improvements">Aurora Web UI Improvements</h2>
-
-<p>Since the scheduler can now orchestrate job updates, it has awareness of the progress and outcome of updates.  This means you can see a progress bar for in-flight updates, and the history of updates for your jobs.  Additionally, the performance of the UI was improved, especially for large roles and jobs <a href="https://issues.apache.org/jira/browse/AURORA-458">AURORA-458</a>.</p>
-
-<h2 id="service-announcement-and-management">Service Announcement and Management</h2>
-
-<p>Job configurations can now supply an <a href="http://aurora.incubator.apache.org/documentation/latest/configuration-reference/#announcer-objects"><code>announce</code> parameter</a>, which is meant to be a way to opt-in for registration in a service discovery system. This has been implemented in the Aurora executor, and will automatically announce tasks via ZooKeeper.</p>
-
-<h2 id="aurora-client-improvements">Aurora Client Improvements</h2>
-
-<p>Progress was made in features to <a href="http://aurora.incubator.apache.org/documentation/latest/clientv2/">v2</a> of the <a href="http://aurora.incubator.apache.org/documentation/latest/client-commands/">Aurora client</a>. We will support version 1 and version 2 for 0.6.0, with version 1 to be removed in 0.7.0.</p>
-
-<h2 id="improved-project-documentation">Improved Project Documentation</h2>
-
-<p>New documentation pages including:</p>
-
-<ul>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/">Deploying the Aurora scheduler</a></li>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/cron-jobs/">Cron jobs</a></li>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/sla/">SLA measurement</a></li>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/storage/">Storage</a></li>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/storage-config/">Storage configuration and measurement</a></li>
-<li><a href="http://aurora.incubator.apache.org/documentation/latest/monitoring/">Monitoring</a></li>
-</ul>
-
-<h2 id="getting-involved">Getting Involved</h2>
-
-<p>We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the <a href="https://aurora.apache.org/community">user mailing list and IRC</a>. The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.</p>
-
-<h2 id="thanks">Thanks</h2>
-
-<p>Thanks to the 16 contributors who made 0.6.0-incubating possible:</p>
-
-<p>Bill Farner, Maxim Khutornenko, Kevin Sweeney, Mark Chu-Carroll, Joshua Cohen, Zameer Manji, David McLaughlin, Brian Wickman, Joe Smith, Jake Farrell, Matthew Jeffryes, Dominic Hamon, Bjoern Metzdorf, Joseph Glanville, David Robinson, David Pan.</p>
-
-</div>
-</div>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/blog/feed.xml b/publish/blog/feed.xml
deleted file mode 100644
index 8a83912..0000000
--- a/publish/blog/feed.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-
-<feed xmlns="http://www.w3.org/2005/Atom">
-  <title>Apache Aurora Blog</title>
-  <id>http://aurora.incubator.apache.org/blog</id>
-  <link href="http://aurora.incubator.apache.org/blog" />
-  <link href="http://aurora.incubator.apache.org/blog/feed.xml" rel="self"/>
-  <updated>2014-12-08T00:00:00Z</updated>
-  
-  <entry>
-    <id>http://aurora.incubator.apache.org/blog/aurora-0-6-0-incubating-released/</id>
-    <link href="/blog/aurora-0-6-0-incubating-released/" />
-    <title>
-      Aurora 0.6.0-incubating Released
-    </title>
-    <updated>2014-12-08T00:00:00Z</updated>
-    <author>
-      <name>Bill Farner</name>
-    </author>
-    <content type="html">
-      &lt;p&gt;The latest Apache Aurora release, 0.6.0-incubating, is now available for download. This version marks the second Aurora release since becoming part of the &lt;a href=&quot;http://incubator.apache.org&quot;&gt;Apache Incubator&lt;/a&gt;, and includes the following features and improvements:&lt;/p&gt;
-
-&lt;ul&gt;
-&lt;li&gt;Introduced highly-available, scheduler-driven job updates in beta (&lt;a href=&quot;https://issues.apache.org/jira/browse/AURORA-610&quot;&gt;AURORA-610&lt;/a&gt;)&lt;/li&gt;
-&lt;li&gt;Improvements to the Aurora web UI, including visualization of job updates&lt;/li&gt;
-&lt;li&gt;Added automatic service registration in ZooKeeper (&lt;a href=&quot;https://issues.apache.org/jira/browse/AURORA-587&quot;&gt;AURORA-587&lt;/a&gt;)&lt;/li&gt;
-&lt;li&gt;Updates to Aurora client version two, deprecation of v1 coming in future release&lt;/li&gt;
-&lt;li&gt;Lots of new  &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/&quot;&gt;documentation&lt;/a&gt;. Documentation now includes pages for &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/&quot;&gt;deploying the Aurora scheduler&lt;/a&gt;, &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/cron-jobs/&quot;&gt;cron jobs&lt;/a&gt;, &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/sla/&quot;&gt;SLA measurement&lt;/a&gt;, &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/storage/&quot;&gt;storage&lt;/a&gt;, and &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/storage-config/&quot;&gt;storage configuration and measurement&lt;/a&gt;.&lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;p&gt;Full release notes are available in the release &lt;a href=&quot;https://git-wip-us.apache.org/repos/asf?p=incubator-aurora.git&amp;amp;f=CHANGELOG&amp;amp;hb=0.6.0-rc2&quot;&gt;CHANGELOG&lt;/a&gt;.&lt;/p&gt;
-
-&lt;h2 id=&quot;highly-available,-scheduler-driven-updates&quot;&gt;Highly-available, scheduler-driven updates&lt;/h2&gt;
-
-&lt;p&gt;Rolling updates of services is a crucial feature in Aurora. As such, we
-want to take great care when changing its behavior. Previously, Aurora operated
-by delegating this functionality to the client (or any API client, for that
-matter). In this version, the scheduler can take over the 
-responsibility of application update orchestration. Further details were discussed on the &lt;a href=&quot;http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/201407.mbox/%3CCAGRA8uMxwVDokp_iHXhNru2gd-x_nM%2BDYAurpfAO6wuX7%3DnHFw%40mail.gmail.com%3E&quot;&gt;Aurora mailing list&lt;/a&gt;.&lt;/p&gt;
-
-&lt;h2 id=&quot;aurora-web-ui-improvements&quot;&gt;Aurora Web UI Improvements&lt;/h2&gt;
-
-&lt;p&gt;Since the scheduler can now orchestrate job updates, it has awareness of the progress and outcome of updates.  This means you can see a progress bar for in-flight updates, and the history of updates for your jobs.  Additionally, the performance of the UI was improved, especially for large roles and jobs &lt;a href=&quot;https://issues.apache.org/jira/browse/AURORA-458&quot;&gt;AURORA-458&lt;/a&gt;.&lt;/p&gt;
-
-&lt;h2 id=&quot;service-announcement-and-management&quot;&gt;Service Announcement and Management&lt;/h2&gt;
-
-&lt;p&gt;Job configurations can now supply an &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/configuration-reference/#announcer-objects&quot;&gt;&lt;code&gt;announce&lt;/code&gt; parameter&lt;/a&gt;, which is meant to be a way to opt-in for registration in a service discovery system. This has been implemented in the Aurora executor, and will automatically announce tasks via ZooKeeper.&lt;/p&gt;
-
-&lt;h2 id=&quot;aurora-client-improvements&quot;&gt;Aurora Client Improvements&lt;/h2&gt;
-
-&lt;p&gt;Progress was made in features to &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/clientv2/&quot;&gt;v2&lt;/a&gt; of the &lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/client-commands/&quot;&gt;Aurora client&lt;/a&gt;. We will support version 1 and version 2 for 0.6.0, with version 1 to be removed in 0.7.0.&lt;/p&gt;
-
-&lt;h2 id=&quot;improved-project-documentation&quot;&gt;Improved Project Documentation&lt;/h2&gt;
-
-&lt;p&gt;New documentation pages including:&lt;/p&gt;
-
-&lt;ul&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/&quot;&gt;Deploying the Aurora scheduler&lt;/a&gt;&lt;/li&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/cron-jobs/&quot;&gt;Cron jobs&lt;/a&gt;&lt;/li&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/sla/&quot;&gt;SLA measurement&lt;/a&gt;&lt;/li&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/storage/&quot;&gt;Storage&lt;/a&gt;&lt;/li&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/storage-config/&quot;&gt;Storage configuration and measurement&lt;/a&gt;&lt;/li&gt;
-&lt;li&gt;&lt;a href=&quot;http://aurora.incubator.apache.org/documentation/latest/monitoring/&quot;&gt;Monitoring&lt;/a&gt;&lt;/li&gt;
-&lt;/ul&gt;
-
-&lt;h2 id=&quot;getting-involved&quot;&gt;Getting Involved&lt;/h2&gt;
-
-&lt;p&gt;We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the &lt;a href=&quot;https://aurora.apache.org/community&quot;&gt;user mailing list and IRC&lt;/a&gt;. The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.&lt;/p&gt;
-
-&lt;h2 id=&quot;thanks&quot;&gt;Thanks&lt;/h2&gt;
-
-&lt;p&gt;Thanks to the 16 contributors who made 0.6.0-incubating possible:&lt;/p&gt;
-
-&lt;p&gt;Bill Farner, Maxim Khutornenko, Kevin Sweeney, Mark Chu-Carroll, Joshua Cohen, Zameer Manji, David McLaughlin, Brian Wickman, Joe Smith, Jake Farrell, Matthew Jeffryes, Dominic Hamon, Bjoern Metzdorf, Joseph Glanville, David Robinson, David Pan.&lt;/p&gt;
-
-	</content>
-  </entry>
-  
-</feed>
\ No newline at end of file
diff --git a/publish/blog/index.html b/publish/blog/index.html
deleted file mode 100644
index d8a8975..0000000
--- a/publish/blog/index.html
+++ /dev/null
@@ -1,83 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <div class="row-fluid">
-	<div class="col-md-3">
-		<h4>Apache Aurora Blog</h4>
-		<p>The Aurora blog is a place for release announcements and for project committers to highlight features of the software.</p>
-	</div>
-	<div class="col-md-9">
-		
-		  <article>
-		  	<h2><a href="/blog/aurora-0-6-0-incubating-released/">Aurora 0.6.0-incubating Released</a></h2>
-			<p><em>Posted by Bill Farner, December  8, 2014</em></p>
-		  </article>
-		
-	</div>
-</div>
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/community/index.html b/publish/community/index.html
deleted file mode 100644
index efd1594..0000000
--- a/publish/community/index.html
+++ /dev/null
@@ -1,104 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="community">Community</h1>
-
-<div class="row-fluid">
-        <div class="col-md-4">
-        <h3>Contributing to Aurora</h3>
-        <h4 name="reportbugs">Report or track a bug</h4>
-        <p>Bugs can be reported on our <a href="http://issues.apache.org/jira/browse/AURORA">JIRA issue tracker</a>. In order to create a new issue, you&rsquo;ll need register for an account.</p>
-
-        <h4 name="contribute">Submit a core patch</h4>
-        <p>Please follow our <a href="/docs/howtocontribute/">contribution guidelines</a> when submitting a patch. We welcome them!</p>
-    </div>
-    <div class="col-md-4">
-        <h3>Stay in Touch</h3>
-        <h4>Mailing lists</h4>
-        <p><strong>Developers</strong> - <a href="mailto:dev-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:dev-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/">Archive</a><br />
-
-            <strong>Issues</strong> - <a href="mailto:issues-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:issues-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-issues/">Archive</a><br />
-
-            <strong>Commits</strong> - <a href="mailto:commits-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:commits-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-commits/">Archive</a><br />
-
-        <strong>Reviews</strong> - <a href="mailto:reviews-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:reviews-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-reviews/">Archive</a></p>
-
-        <h4 name="ircchannel">IRC</h4>
-        <p>Developers and users hang out in <code>#aurora</code> on <code>irc.freenode.net</code>. If you&rsquo;re new to IRC, we suggest trying a <a href="http://webchat.freenode.net/?channels=#aurora">web-based client</a>.</p>
-        
-        <h4>Join our weekly IRC meetings</h4>
-        <p>We hold weekly IRC meetings as an opportunity for users and developers to engage.  Meetings are every Monday at <a href="http://www.timeanddate.com/worldclock/fixedtime.html?msg=Aurora+Weekly+Meeting&iso=20140421T11&p1=224&ah=1">11AM Pacific Time</a>.</p>
-        </div>
-    <div class="col-md-4">
-        <h3>Follow the Project</h3>     
-        <a class="twitter-timeline" href="https://twitter.com/ApacheAurora" data-widget-id="512693636127920129">Tweets by @ApacheMesos</a>
-        <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+"://platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
-    </div>
-</div>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/developers/index.html b/publish/developers/index.html
deleted file mode 100644
index d342218..0000000
--- a/publish/developers/index.html
+++ /dev/null
@@ -1,84 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="developers">Developers</h1>
-
-<div class="row-fluid">
-  <h4 name="reportbugs">Report or track a bug</h4>
-  <p>New bugs can be reported on our <a href="http://issues.apache.org/jira/browse/AURORA">Jira issue tracker</a>. In order to create a new issue, you&rsquo;ll need to signup for an account.</p>
-
-  <h4 name="contribute">Contribute a core patch</h4>
-  <p>Follow our <a href="/docs/howtocontribute/">contribution guidelines</a> when submitting a patch.</p>
-
-  <h4 name="ircchannel">IRC Channel</h4>
-    <p>Many of the Aurora developers and users chat in the #aurora channel on irc.freenode.net.</p>
-    
-    <p>If you are new to IRC, you can use a <a href="http://webchat.freenode.net/?channels=#aurora">web-based client</a>.</p>
-</div>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/docs/gettingstarted/index.html b/publish/docs/gettingstarted/index.html
deleted file mode 100644
index bb5d516..0000000
--- a/publish/docs/gettingstarted/index.html
+++ /dev/null
@@ -1,73 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="getting-started">Getting Started</h1>
-
-<p>The Aurora &lsquo;Getting Started&rsquo; guide is <a href="https://github.com/apache/incubator-aurora/blob/master/README.md">currently available on github</a> while the project moves its documentation to the Apache Software Foundation.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/docs/howtocontribute/index.html b/publish/docs/howtocontribute/index.html
deleted file mode 100644
index 7bcf57f..0000000
--- a/publish/docs/howtocontribute/index.html
+++ /dev/null
@@ -1,115 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="how-to-contribute">How To Contribute</h1>
-
-<h2 id="getting-your-reviewboard-account">Getting your ReviewBoard Account</h2>
-
-<p>Go to <a href="https://reviews.apache.org">https://reviews.apache.org</a> and create an account.</p>
-
-<h2 id="setting-up-your-reviewboard-environment">Setting up your ReviewBoard Environment</h2>
-
-<p>Run <code>./rbt status</code>. The first time this runs it will bootstrap and you will be asked to login.
-Subsequent runs will cache your login credentials.</p>
-
-<h2 id="submitting-a-patch-for-review">Submitting a Patch for Review</h2>
-
-<p>Post a review with <code>rbt</code>, fill out the fields in your browser and hit Publish.</p>
-<pre class="highlight text">./rbt post -o -g
-</pre>
-<h2 id="updating-an-existing-review">Updating an Existing Review</h2>
-
-<p>Incorporate review feedback, make some more commits, update your existing review, fill out the
-fields in your browser and hit Publish.</p>
-<pre class="highlight text">./rbt post -o -r &lt;RB_ID&gt;
-</pre>
-<h2 id="merging-your-own-review-(committers)">Merging Your Own Review (Committers)</h2>
-
-<p>Once you have shipits from the right committers, merge your changes in a single commit and mark
-the review as submitted. The typical workflow is:</p>
-<pre class="highlight text">git checkout master
-git pull origin master
-./rbt patch -c &lt;RB_ID&gt;  # Verify the automatically-generated commit message looks sane,
-                        # editing if necessary.
-git show master         # Verify everything looks sane
-git push origin master
-./rbt close &lt;RB_ID&gt;
-</pre>
-<p>Note that even if you&rsquo;re developing using feature branches you will not use <code>git merge</code> - each
-commit will be an atomic change accompanied by a ReviewBoard entry.</p>
-
-<h2 id="merging-someone-else&#39;s-review">Merging Someone Else&rsquo;s Review</h2>
-
-<p>Sometimes you&rsquo;ll need to merge someone else&rsquo;s RB. The typical workflow for this is</p>
-<pre class="highlight text">git checkout master
-git pull origin master
-./rbt patch -c &lt;RB_ID&gt;
-git show master  # Verify everything looks sane, author is correct
-git push origin master
-</pre>
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/client-cluster-configuration/index.html b/publish/documentation/latest/client-cluster-configuration/index.html
deleted file mode 100644
index c8f303d..0000000
--- a/publish/documentation/latest/client-cluster-configuration/index.html
+++ /dev/null
@@ -1,183 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="client-cluster-configuration">Client Cluster Configuration</h1>
-
-<p>A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
-which it can communicate. Ultimately this allows client users to reference clusters with short names
-like us-east and eu. The following properties may be set:</p>
-
-<table><thead>
-<tr>
-<th style="text-align: left"><strong>Property</strong></th>
-<th style="text-align: left"><strong>Type</strong></th>
-<th style="text-align: left"><strong>Description</strong></th>
-</tr>
-</thead><tbody>
-<tr>
-<td style="text-align: left"><strong>name</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">Cluster name (Required)</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>slave_root</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">Path to mesos slave work dir (Required)</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>slave<em>run</em>directory</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">Name of mesos slave run dir (Required)</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>zk</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">Hostname of ZooKeeper instance used to resolve Aurora schedulers.</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>zk_port</strong></td>
-<td style="text-align: left">Integer</td>
-<td style="text-align: left">Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>scheduler<em>zk</em>path</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">ZooKeeper path under which scheduler instances are registered.</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>scheduler_uri</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">URI of Aurora scheduler instance.</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>proxy_url</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">Used by the client to format URLs for display.</td>
-</tr>
-<tr>
-<td style="text-align: left"><strong>auth_mechanism</strong></td>
-<td style="text-align: left">String</td>
-<td style="text-align: left">The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)</td>
-</tr>
-</tbody></table>
-
-<h4 id="name">name</h4>
-
-<p>The name of the Aurora cluster represented by this entry. This name will be the <code>cluster</code> portion of
-any job keys identifying jobs running within the cluster.</p>
-
-<h4 id="slave_root">slave_root</h4>
-
-<p>The path on the mesos slaves where executing tasks can be found. It is used in combination with the
-<code>slave_run_directory</code> property by <code>aurora task run</code> and <code>aurora task ssh</code> to change into the sandbox
-directory after connecting to the host. This value should match the value passed to <code>mesos-slave</code>
-as <code>-work_dir</code>.</p>
-
-<h4 id="slaverundirectory">slave<em>run</em>directory</h4>
-
-<p>The name of the directory where the task run can be found. This is used in combination with the
-<code>slave_root</code> property by <code>aurora task run</code> and <code>aurora task ssh</code> to change into the sandbox
-directory after connecting to the host. This should almost always be set to <code>latest</code>.</p>
-
-<h4 id="zk">zk</h4>
-
-<p>The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
-to elect a leader. The client will connect to this ZooKeeper instance to determine the current
-leader. This host should match the host passed to the scheduler as <code>-zk_endpoints</code>.</p>
-
-<h4 id="zk_port">zk_port</h4>
-
-<p>The port on which the ZooKeeper instance is running. If not set this will default to the standard
-ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
-<code>-zk_endpoints</code>.</p>
-
-<h4 id="schedulerzkpath">scheduler<em>zk</em>path</h4>
-
-<p>The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
-match the value passed to the scheduler as <code>-serverset_path</code>.</p>
-
-<h4 id="scheduler_uri">scheduler_uri</h4>
-
-<p>The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
-in circumstances where direct communication with a single scheduler is needed (e.g. testing
-environments). It is strongly advised to <strong>never</strong> use this property for production deploys.</p>
-
-<h4 id="proxy_url">proxy_url</h4>
-
-<p>Instead of using the hostname of the leading scheduler as the base url, if <code>proxy_url</code> is set, its
-value will be used instead. In that scenario the value for <code>proxy_url</code> would be, for example, the
-URL of your VIP in a loadbalancer or a roundrobin DNS name.</p>
-
-<h4 id="auth_mechanism">auth_mechanism</h4>
-
-<p>The identifier of an authentication mechanism that the client should use when communicating with the
-scheduler. Support for values other than <code>UNAUTHENTICATED</code> is currently not available, however work
-is <a href="https://issues.apache.org/jira/browse/AURORA-720">in progress</a> to add federated authentication
-to Aurora which will allow for more robust configuration of client/scheduler authentication.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/client-commands/index.html b/publish/documentation/latest/client-commands/index.html
deleted file mode 100644
index 707e937..0000000
--- a/publish/documentation/latest/client-commands/index.html
+++ /dev/null
@@ -1,586 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="aurora-client-commands">Aurora Client Commands</h1>
-
-<p>The most up-to-date reference is in the client itself: use the
-<code>aurora help</code> subcommand (for example, <code>aurora help</code> or
-<code>aurora help create</code>) to find the latest information on parameters and
-functionality. Note that <code>aurora help open</code> does not work, due to underlying issues with
-reflection.</p>
-
-<ul>
-<li><a href="#introduction">Introduction</a></li>
-<li><a href="#cluster-configuration">Cluster Configuration</a></li>
-<li><a href="#job-keys">Job Keys</a></li>
-<li><a href="#modifying-aurora-client-commands">Modifying Aurora Client Commands</a></li>
-<li><a href="#regular-jobs">Regular Jobs</a>
-
-<ul>
-<li><a href="#creating-and-running-a-job">Creating and Running a Job</a></li>
-<li><a href="#running-a-command-on-a-running-job">Running a Command On a Running Job</a></li>
-<li><a href="#killing-a-job">Killing a Job</a></li>
-<li><a href="#updating-a-job">Updating a Job</a>
-
-<ul>
-<li><a href="#user-content-asynchronous-job-updates-beta">Asynchronous job updates (beta)</a></li>
-</ul></li>
-<li><a href="#renaming-a-job">Renaming a Job</a></li>
-<li><a href="#restarting-jobs">Restarting Jobs</a></li>
-</ul></li>
-<li><a href="#cron-jobs">Cron Jobs</a></li>
-<li><a href="#comparing-jobs">Comparing Jobs</a></li>
-<li><a href="#viewingexamining-jobs">Viewing/Examining Jobs</a>
-
-<ul>
-<li><a href="#listing-jobs">Listing Jobs</a></li>
-<li><a href="#inspecting-a-job">Inspecting a Job</a></li>
-<li><a href="#versions">Versions</a></li>
-<li><a href="#checking-your-quota">Checking Your Quota</a></li>
-<li><a href="#finding-a-job-on-web-ui">Finding a Job on Web UI</a></li>
-<li><a href="#getting-job-status">Getting Job Status</a></li>
-<li><a href="#opening-the-web-ui">Opening the Web UI</a></li>
-<li><a href="#sshing-to-a-specific-task-machine">SSHing to a Specific Task Machine</a></li>
-<li><a href="#templating-command-arguments">Templating Command Arguments</a></li>
-</ul></li>
-</ul>
-
-<h2 id="introduction">Introduction</h2>
-
-<p>Once you have written an <code>.aurora</code> configuration file that describes
-your Job and its parameters and functionality, you interact with Aurora
-using Aurora Client commands. This document describes all of these commands
-and how and when to use them. All Aurora Client commands start with
-<code>aurora</code>, followed by the name of the specific command and its
-arguments.</p>
-
-<p><em>Job keys</em> are a very common argument to Aurora commands, as well as the
-gateway to useful information about a Job. Before using Aurora, you
-should read the next section which describes them in detail. The section
-after that briefly describes how you can modify the behavior of certain
-Aurora Client commands, linking to a detailed document about how to do
-that.</p>
-
-<p>This is followed by the Regular Jobs section, which describes the basic
-Client commands for creating, running, and manipulating Aurora Jobs.
-After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
-other words, various commands for getting information and metadata about
-Aurora Jobs.</p>
-
-<h2 id="cluster-configuration">Cluster Configuration</h2>
-
-<p>The client must be able to find a configuration file that specifies available clusters. This file
-declares shorthand names for clusters, which are in turn referenced by job configuration files
-and client commands.</p>
-
-<p>The client will load at most two configuration files, making both of their defined clusters
-available. The first is intended to be a system-installed cluster, using the path specified in
-the environment variable <code>AURORA_CONFIG_ROOT</code>, defaulting to <code>/etc/aurora/clusters.json</code> if the
-environment variable is not set. The second is a user-installed file, located at
-<code>~/.aurora/clusters.json</code>.</p>
-
-<p>A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
-communicates with a single (non-leader-elected) scheduler.  For example:</p>
-<pre class="highlight javascript"><span class="p">[{</span>
-  <span class="s2">&quot;name&quot;</span><span class="err">:</span> <span class="s2">&quot;example&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;scheduler_uri&quot;</span><span class="err">:</span> <span class="s2">&quot;localhost:55555&quot;</span><span class="p">,</span>
-<span class="p">}]</span>
-</pre>
-<p>A configuration for a leader-elected scheduler would contain something like:</p>
-<pre class="highlight javascript"><span class="p">[{</span>
-  <span class="s2">&quot;name&quot;</span><span class="err">:</span> <span class="s2">&quot;example&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;zk&quot;</span><span class="err">:</span> <span class="s2">&quot;192.168.33.7&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;scheduler_zk_path&quot;</span><span class="err">:</span> <span class="s2">&quot;/aurora/scheduler&quot;</span>
-<span class="p">}]</span>
-</pre>
-<p>For more details on cluster configuration see the
-<a href="/documentation/latest/client-cluster-configuration/">Client Cluster Configuration</a> documentation.</p>
-
-<h2 id="job-keys">Job Keys</h2>
-
-<p>A job key is a unique system-wide identifier for an Aurora-managed
-Job, for example <code>cluster1/web-team/test/experiment204</code>. It is a 4-tuple
-consisting of, in order, <em>cluster</em>, <em>role</em>, <em>environment</em>, and
-<em>jobname</em>, separated by /s. Cluster is the name of an Aurora
-cluster. Role is the Unix service account under which the Job
-runs. Environment is a namespace component like <code>devel</code>, <code>test</code>,
-<code>prod</code>, or <code>stagingN.</code> Jobname is the Job&rsquo;s name.</p>
-
-<p>The combination of all four values uniquely specifies the Job. If any
-one value is different from that of another job key, the two job keys
-refer to different Jobs. For example, job key
-<code>cluster1/tyg/prod/workhorse</code> is different from
-<code>cluster1/tyg/prod/workcamel</code> is different from
-<code>cluster2/tyg/prod/workhorse</code> is different from
-<code>cluster2/foo/prod/workhorse</code> is different from
-<code>cluster1/tyg/test/workhorse.</code></p>
-
-<p>Role names are user accounts existing on the slave machines. If you don&rsquo;t know what accounts
-are available, contact your sysadmin.</p>
-
-<p>Environment names are namespaces; you can count on <code>prod</code>, <code>devel</code> and <code>test</code> existing.</p>
-
-<h2 id="modifying-aurora-client-commands">Modifying Aurora Client Commands</h2>
-
-<p>For certain Aurora Client commands, you can define hook methods that run
-either before or after an action that takes place during the command&rsquo;s
-execution, as well as based on whether the action finished successfully or failed
-during execution. Basically, a hook is code that lets you extend the
-command&rsquo;s actions. The hook executes on the client side, specifically on
-the machine executing Aurora commands.</p>
-
-<p>Hooks can be associated with these Aurora Client commands.</p>
-
-<ul>
-<li><code>cancel_update</code></li>
-<li><code>create</code></li>
-<li><code>kill</code></li>
-<li><code>restart</code></li>
-<li><code>update</code></li>
-</ul>
-
-<p>The process for writing and activating them is complex enough
-that we explain it in a devoted document, <a href="/documentation/latest/hooks/">Hooks for Aurora Client API</a>.</p>
-
-<h2 id="regular-jobs">Regular Jobs</h2>
-
-<p>This section covers Aurora commands related to running, killing,
-renaming, updating, and restarting a basic Aurora Job.</p>
-
-<h3 id="creating-and-running-a-job">Creating and Running a Job</h3>
-<pre class="highlight text">aurora create &lt;job key&gt; &lt;configuration file&gt;
-</pre>
-<p>Creates and then runs a Job with the specified job key based on a <code>.aurora</code> configuration file.
-The configuration file may also contain and activate hook definitions.</p>
-
-<p><code>create</code> can take four named parameters:</p>
-
-<ul>
-<li><code>-E NAME=VALUE</code> Bind a Thermos mustache variable name to a
-value. Multiple flags specify multiple values. Defaults to <code>[]</code>.</li>
-<li><code>-o, --open_browser</code> Open a browser window to the scheduler UI Job
-page after a job changing operation happens. When <code>False</code>, the Job
-URL prints on the console and the user has to copy/paste it
-manually. Defaults to <code>False</code>. Does not work when running in Vagrant.</li>
-<li><code>-j, --json</code> If specified, configuration argument is read as a
-string in JSON format. Defaults to False.</li>
-<li><code>--wait_until=STATE</code> Block the client until all the Tasks have
-transitioned into the requested state. Possible values are: <code>PENDING</code>,
-<code>RUNNING</code>, <code>FINISHED</code>. Default: <code>PENDING</code></li>
-</ul>
-
-<h3 id="running-a-command-on-a-running-job">Running a Command On a Running Job</h3>
-<pre class="highlight text">aurora run &lt;job_key&gt; &lt;cmd&gt;
-</pre>
-<p>Runs a shell command on all machines currently hosting shards of a
-single Job.</p>
-
-<p><code>run</code> supports the same command line wildcards used to populate a Job&rsquo;s
-commands; i.e. anything in the <code>{{mesos.*}}</code> and <code>{{thermos.*}}</code>
-namespaces.</p>
-
-<p><code>run</code> can take three named parameters:</p>
-
-<ul>
-<li><code>-t NUM_THREADS</code>, <code>--threads=NUM_THREADS</code>The number of threads to
-use, defaulting to <code>1</code>.</li>
-<li><code>--user=SSH_USER</code> ssh as this user instead of the given role value.
-Defaults to None.</li>
-<li><code>-e, --executor_sandbox</code>  Run the command in the executor sandbox
-instead of the Task sandbox. Defaults to False.</li>
-</ul>
-
-<h3 id="killing-a-job">Killing a Job</h3>
-<pre class="highlight text">aurora kill &lt;job key&gt; &lt;configuration file&gt;
-</pre>
-<p>Kills all Tasks associated with the specified Job, blocking until all
-are terminated. Defaults to killing all shards in the Job.</p>
-
-<p>The <code>&lt;configuration file&gt;</code> argument for <code>kill</code> is optional. Use it only
-if it contains hook definitions and activations that affect the
-kill command.</p>
-
-<p><code>kill</code> can take two named parameters:</p>
-
-<ul>
-<li><code>-o, --open_browser</code> Open a browser window to the scheduler UI Job
-page after a job changing operation happens. When <code>False</code>, the Job
-URL prints on the console and the user has to copy/paste it
-manually. Defaults to <code>False</code>. Does not work when running in Vagrant.</li>
-<li><code>--shards=SHARDS</code> A list of shard ids to act on. Can either be a
-comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
-combination of the two (e.g. 0-2,5,7-9). Defaults to acting on all
-shards.</li>
-</ul>
-
-<h3 id="updating-a-job">Updating a Job</h3>
-<pre class="highlight text">aurora update [--shards=ids] &lt;job key&gt; &lt;configuration file&gt;
-aurora cancel_update &lt;job key&gt; &lt;configuration file&gt;
-</pre>
-<p>Given a running job, does a rolling update to reflect a new
-configuration version. Only updates Tasks in the Job with a changed
-configuration. You can further restrict the operated on Tasks by
-using <code>--shards</code> and specifying a comma-separated list of job shard ids.</p>
-
-<p>You may want to run <code>aurora diff</code> beforehand to validate which Tasks
-have different configurations.</p>
-
-<p>Updating jobs are locked to be sure the update finishes without
-disruption. If the update abnormally terminates, the lock may stay
-around and cause failure of subsequent update attempts.
- <code>aurora cancel_update</code>unlocks the Job specified by
-its <code>job_key</code> argument. Be sure you don&rsquo;t issue <code>cancel_update</code> when
-another user is working with the specified Job.</p>
-
-<p>The <code>&lt;configuration file&gt;</code> argument for <code>cancel_update</code> is optional. Use
-it only if it contains hook definitions and activations that affect the
-<code>cancel_update</code> command. The <code>&lt;configuration file&gt;</code> argument for
-<code>update</code> is required, but in addition to a new configuration it can be
-used to define and activate hooks for <code>update</code>.</p>
-
-<p><code>update</code> can take four named parameters:</p>
-
-<ul>
-<li><code>--shards=SHARDS</code> A list of shard ids to update. Can either be a
-comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
-combination of the two (e.g. 0-2,5,7-9). If not  set, all shards are
-acted on. Defaults to None.</li>
-<li><code>-E NAME=VALUE</code> Binds a Thermos mustache variable name to a value.
-Use multiple flags to specify multiple values. Defaults to <code>[]</code>.</li>
-<li><code>-j, --json</code> If specified, configuration is read in JSON format.
-Defaults to <code>False</code>.</li>
-<li><code>--updater_health_check_interval_seconds=HEALTH_CHECK_INTERVAL_SECONDS</code>
-Time interval between subsequent shard status checks. Defaults to <code>3</code>.</li>
-</ul>
-
-<h4 id="asynchronous-job-updates-(beta)">Asynchronous job updates (beta)</h4>
-
-<p>As of 0.6.0, Aurora will coordinate updates (and rollbacks) within the
-scheduler. Performing updates this way also allows the scheduler to display
-update progress and job update history in the browser.</p>
-
-<p>There are several sub-commands to manage job updates:</p>
-<pre class="highlight text">aurora2 beta-update start &lt;job key&gt; &lt;configuration file
-aurora2 beta-update status &lt;job key&gt;
-aurora2 beta-update pause &lt;job key&gt;
-aurora2 beta-update resume &lt;job key&gt;
-aurora2 beta-update abort &lt;job key&gt;
-aurora2 beta-update list &lt;cluster&gt;
-</pre>
-<p>When you <code>start</code> a job update, the command will return once it has sent the
-instructions to the scheduler.  At that point, you may view detailed
-progress for the update with the <code>status</code> subcommand, in addition to viewing
-graphical progress in the web browser.  You may also get a full listing of
-in-progress updates in a cluster with <code>list</code>.</p>
-
-<p>Once an update has been started, you can <code>pause</code> to keep the update but halt
-progress.  This can be useful for doing things like debug a  partially-updated
-job to determine whether you would like to proceed.  You can <code>resume</code> to
-proceed.</p>
-
-<p>You may <code>abort</code> a job update regardless of the state it is in. This will
-instruct the scheduler to completely abandon the job update and leave the job
-in the current (possibly partially-updated) state.</p>
-
-<h3 id="renaming-a-job">Renaming a Job</h3>
-
-<p>Renaming is a tricky operation as downstream clients must be informed of
-the new name. A conservative approach
-to renaming suitable for production services is:</p>
-
-<ol>
-<li> Modify the Aurora configuration file to change the role,
-environment, and/or name as appropriate to the standardized naming
-scheme.</li>
-<li><p>Check that only these naming components have changed
-with <code>aurora diff</code>.</p>
-<pre class="highlight text">aurora diff &lt;job_key&gt; &lt;job_configuration&gt;
-</pre></li>
-<li><p>Create the (identical) job at the new key. You may need to request a
-temporary quota increase.</p>
-<pre class="highlight text">aurora create &lt;new_job_key&gt; &lt;job_configuration&gt;
-</pre></li>
-<li><p>Migrate all clients over to the new job key. Update all links and
-dashboards. Ensure that both job keys run identical versions of the
-code while in this state.</p></li>
-<li><p>After verifying that all clients have successfully moved over, kill
-the old job.</p>
-<pre class="highlight text">aurora kill &lt;old_job_key&gt;
-</pre></li>
-<li><p>If you received a temporary quota increase, be sure to let the
-powers that be know you no longer need the additional capacity.</p></li>
-</ol>
-
-<h3 id="restarting-jobs">Restarting Jobs</h3>
-
-<p><code>restart</code> restarts all of a job key identified Job&rsquo;s shards:</p>
-<pre class="highlight text">aurora restart &lt;job_key&gt; &lt;configuration file&gt;
-</pre>
-<p>Restarts are controlled on the client side, so aborting
-the <code>restart</code> command halts the restart operation.</p>
-
-<p><code>restart</code> does a rolling restart. You almost always want to do this, but
-not if all shards of a service are misbehaving and are
-completely dysfunctional. To not do a rolling restart, use
-the <code>-shards</code> option described below.</p>
-
-<p><strong>Note</strong>: <code>restart</code> only applies its command line arguments and does not
-use or is affected by <code>update.config</code>. Restarting
-does <strong><em>not</em></strong> involve a configuration change. To update the
-configuration, use <code>update.config</code>.</p>
-
-<p>The <code>&lt;configuration file&gt;</code> argument for restart is optional. Use it only
-if it contains hook definitions and activations that affect the
-<code>restart</code> command.</p>
-
-<p>In addition to the required job key argument, there are eight
-<code>restart</code> specific optional arguments:</p>
-
-<ul>
-<li><code>--updater_health_check_interval_seconds</code>: Defaults to <code>3</code>, the time
-interval between subsequent shard status checks.</li>
-<li><code>--shards=SHARDS</code>: Defaults to None, which restarts all shards.
-Otherwise, only the specified-by-id shards restart. They can be
-comma-separated <code>(0, 8, 9)</code>, a range <code>(3-5)</code> or a
-combination <code>(0, 3-5, 8, 9-11)</code>.</li>
-<li><code>--batch_size</code>: Defaults to <code>1</code>, the number of shards to be started
-in one iteration. So, for example, for value 3, it tries to restart
-the first three shards specified by <code>--shards</code> simultaneously, then
-the next three, and so on.</li>
-<li><code>--max_per_shard_failures=MAX_PER_SHARD_FAILURES</code>: Defaults to <code>0</code>,
-the maximum number of restarts per shard during restart. When
-exceeded, it increments the total failure count.</li>
-<li><code>--max_total_failures=MAX_TOTAL_FAILURES</code>: Defaults to <code>0</code>, the
-maximum total number of shard failures tolerated during restart.</li>
-<li><code>-o, --open_browser</code> Open a browser window to the scheduler UI Job
-page after a job changing operation happens. When <code>False</code>, the Job
-url prints on the console and the user has to copy/paste it
-manually. Defaults to <code>False</code>. Does not work when running in Vagrant.</li>
-<li><code>--restart_threshold</code>: Defaults to <code>60</code>, the maximum number of
-seconds before a shard must move into the <code>RUNNING</code> state before
-it&rsquo;s considered a failure.</li>
-<li><code>--watch_secs</code>: Defaults to <code>45</code>, the minimum number of seconds a
-shard must remain in <code>RUNNING</code> state before considered a success.</li>
-</ul>
-
-<h2 id="cron-jobs">Cron Jobs</h2>
-
-<p>You will see various commands and options relating to cron jobs in
-<code>aurora -help</code> and similar. Ignore them, as they&rsquo;re not yet implemented.
-You might be able to use them without causing an error, but nothing happens
-if you do.</p>
-
-<h2 id="comparing-jobs">Comparing Jobs</h2>
-<pre class="highlight text">aurora diff &lt;job_key&gt; config
-</pre>
-<p>Compares a job configuration against a running job. By default the diff
-is determined using <code>diff</code>, though you may choose an alternate
- diff program by specifying the <code>DIFF_VIEWER</code> environment variable.</p>
-
-<p>There are two named parameters:</p>
-
-<ul>
-<li><code>-E NAME=VALUE</code> Bind a Thermos mustache variable name to a
-value. Multiple flags may be used to specify multiple values.
-Defaults to <code>[]</code>.</li>
-<li><code>-j, --json</code> Read the configuration argument in JSON format.
-Defaults to <code>False</code>.</li>
-</ul>
-
-<h2 id="viewing/examining-jobs">Viewing/Examining Jobs</h2>
-
-<p>Above we discussed creating, killing, and updating Jobs. Here we discuss
-how to view and examine Jobs.</p>
-
-<h3 id="listing-jobs">Listing Jobs</h3>
-<pre class="highlight text">aurora list_jobs
-Usage: `aurora list_jobs cluster/role
-</pre>
-<p>Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.</p>
-
-<p>It has two named parameters:</p>
-
-<ul>
-<li><code>--pretty</code>: Displays job information in prettyprinted format.
-Defaults to <code>False</code>.</li>
-<li><code>-c</code>, <code>--show-cron</code>: Shows cron schedule for jobs. Defaults to
-<code>False</code>. Do not use, as it&rsquo;s not yet implemented.</li>
-</ul>
-
-<h3 id="inspecting-a-job">Inspecting a Job</h3>
-<pre class="highlight text">aurora inspect &lt;job_key&gt; config
-</pre>
-<p><code>inspect</code> verifies that its specified job can be parsed from a
-configuration file, and displays the parsed configuration. It has four
-named parameters:</p>
-
-<ul>
-<li><code>--local</code>: Inspect the configuration that the  <code>spawn</code> command would
-create, defaulting to <code>False</code>.</li>
-<li><code>--raw</code>: Shows the raw configuration. Defaults to <code>False</code>.</li>
-<li><code>-j</code>, <code>--json</code>: If specified, configuration is read in JSON format.
-Defaults to <code>False</code>.</li>
-<li><code>-E NAME=VALUE</code>: Bind a Thermos Mustache variable name to a value.
-You can use multiple flags to specify multiple values. Defaults
-to <code>[]</code></li>
-</ul>
-
-<h3 id="versions">Versions</h3>
-<pre class="highlight text">aurora version
-</pre>
-<p>Lists client build information and what Aurora API version it supports.</p>
-
-<h3 id="checking-your-quota">Checking Your Quota</h3>
-<pre class="highlight text">aurora get_quota --cluster=CLUSTER role
-</pre>
-<p>Prints the production quota allocated to the role&rsquo;s value at the given
-cluster.</p>
-
-<h3 id="finding-a-job-on-web-ui">Finding a Job on Web UI</h3>
-
-<p>When you create a job, part of the output response contains a URL that goes
-to the job&rsquo;s scheduler UI page. For example:</p>
-<pre class="highlight text">vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
-INFO] Creating job hello
-INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
-INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
-</pre>
-<p>You can go to the scheduler UI page for this job via <code>http://precise64:8081/scheduler/www-data/prod/hello</code>
-You can go to the overall scheduler UI page by going to the part of that URL that ends at <code>scheduler</code>; <code>http://precise64:8081/scheduler</code></p>
-
-<p>Once you click through to a role page, you see Jobs arranged
-separately by pending jobs, active jobs and finished jobs.
-Jobs are arranged by role, typically a service account for
-production jobs and user accounts for test or development jobs.</p>
-
-<h3 id="getting-job-status">Getting Job Status</h3>
-<pre class="highlight text">aurora status &lt;job_key&gt;
-</pre>
-<p>Returns the status of recent tasks associated with the
-<code>job_key</code> specified Job in its supplied cluster. Typically this includes
-a mix of active tasks (running or assigned) and inactive tasks
-(successful, failed, and lost.)</p>
-
-<h3 id="opening-the-web-ui">Opening the Web UI</h3>
-
-<p>Use the Job&rsquo;s web UI scheduler URL or the <code>aurora status</code> command to find out on which
-machines individual tasks are scheduled. You can open the web UI via the
-<code>open</code> command line command if invoked from your machine:</p>
-<pre class="highlight text">aurora open [&lt;cluster&gt;[/&lt;role&gt;[/&lt;env&gt;/&lt;job_name&gt;]]]
-</pre>
-<p>If only the cluster is specified, it goes directly to that cluster&rsquo;s
-scheduler main page. If the role is specified, it goes to the top-level
-role page. If the full job key is specified, it goes directly to the job
-page where you can inspect individual tasks.</p>
-
-<h3 id="sshing-to-a-specific-task-machine">SSHing to a Specific Task Machine</h3>
-<pre class="highlight text">aurora ssh &lt;job_key&gt; &lt;shard number&gt;
-</pre>
-<p>You can have the Aurora client ssh directly to the machine that has been
-assigned a particular Job/shard number. This may be useful for quickly
-diagnosing issues such as performance issues or abnormal behavior on a
-particular machine.</p>
-
-<p>It can take three named parameters:</p>
-
-<ul>
-<li><code>-e</code>, <code>--executor_sandbox</code>:  Run <code>ssh</code> in the executor sandbox
-instead of the  task sandbox. Defaults to <code>False</code>.</li>
-<li><code>--user=SSH_USER</code>: <code>ssh</code> as the given user instead of as the role in
-the <code>job_key</code> argument. Defaults to none.</li>
-<li><code>-L PORT:NAME</code>: Add tunnel from local port <code>PORT</code> to the remote
-named port  <code>NAME</code>. Defaults to <code>[]</code>.</li>
-</ul>
-
-<h3 id="templating-command-arguments">Templating Command Arguments</h3>
-<pre class="highlight text">aurora run [-e] [-t THREADS] &lt;job_key&gt; -- &lt;&lt;command-line&gt;&gt;
-</pre>
-<p>Given a job specification, run the supplied command on all hosts and
-return the output. You may use the standard Mustache templating rules:</p>
-
-<ul>
-<li><code>{{thermos.ports[name]}}</code> substitutes the specific named port of the
-task assigned to this machine</li>
-<li><code>{{mesos.instance}}</code> substitutes the shard id of the job&rsquo;s task
-assigned to this machine</li>
-<li><code>{{thermos.task_id}}</code> substitutes the task id of the job&rsquo;s task
-assigned to this machine</li>
-</ul>
-
-<p>For example, the following type of pattern can be a powerful diagnostic
-tool:</p>
-<pre class="highlight text">aurora run -t5 cluster1/tyg/devel/seizure -- \
-  &#39;curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime&#39;
-</pre>
-<p>By default, the command runs in the Task&rsquo;s sandbox. The <code>-e</code> option can
-run the command in the executor&rsquo;s sandbox. This is mostly useful for
-Aurora administrators.</p>
-
-<p>You can parallelize the runs by using the <code>-t</code> option.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/clientv2/index.html b/publish/documentation/latest/clientv2/index.html
deleted file mode 100644
index 8609e8d..0000000
--- a/publish/documentation/latest/clientv2/index.html
+++ /dev/null
@@ -1,491 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="aurora-client-v2">Aurora Client v2</h1>
-
-<h2 id="overview">Overview</h2>
-
-<p>Our goal is to replace the current Aurora command-line client. The
-current client suffers from an early monolithic structure, and a long
-development history of rapid unplanned evolution.</p>
-
-<p>In addition to its internal problems, the current Aurora client is
-confusing for users. There are several different kinds of objects
-manipulated by the Aurora command line, and the difference between
-them is often not clear. (What&rsquo;s the difference between a job and a
-configuration?) For each type of object, there are different commands,
-and it&rsquo;s hard to remember which command should be used for which kind
-of object.</p>
-
-<p>Instead of continuing to let the Aurora client develop and evolve
-randomly, it&rsquo;s time to take a principled look at the Aurora command
-line, and figure out how to make our command line processing make
-sense. At the same time, the code needs to be cleaned up, and divided
-into small comprehensible units based on a plugin architecture.</p>
-
-<p>Doing this now will give us a more intuitive, consistent, and easy to
-use client, as well as a sound platform for future development.</p>
-
-<h2 id="goals">Goals</h2>
-
-<ul>
-<li>A command line tool for interacting with Aurora that is easy for
-users to understand.</li>
-<li>A noun/verb command model.</li>
-<li>A modular source-code architecture.</li>
-<li>Non-disruptive transition for users.</li>
-</ul>
-
-<h2 id="non-goals">Non-Goals</h2>
-
-<ul>
-<li>The most important non-goal is that we&rsquo;re not trying to redesign the
-Aurora scheduler, the Aurora executor, or any of the peripheral tools
-that the Aurora command line interacts with; we only want to create a
-better command line client.</li>
-<li>We do not want to change thermos, mesos, hadoop, etc.</li>
-<li>We do not want to create new objects that users will work with to
-interact with Mesos or Aurora.</li>
-<li>We do not want to change Aurora job configuration files or file formats.</li>
-<li>We do not want to change the Aurora API.</li>
-<li>We don&rsquo;t want to boil the ocean: there are many things that we could
-include in the scope of this project, but we don&rsquo;t want to be
-distracted by re-implementing all of twitter.commons in order to
-create a perfect Aurora client.</li>
-</ul>
-
-<h2 id="background">Background</h2>
-
-<p>Aurora is a system that&rsquo;s used to run and manage services and
-service-like jobs running in a datacenter. Aurora takes care of
-allocating resources in order to schedule and run jobs without
-requiring teams to manage dedicated hardware. The heart of Aurora is
-called the scheduler, and is responsible for finding and assigning
-resources to tasks.</p>
-
-<p>The Aurora scheduler provides a thrift API. The scheduler API is
-low-level and difficult to interact with. Users do not interact
-directly with the Aurora API; instead, they use a command-line tool,
-which provides a collection of easy-to-use commands. This command-line
-tool, in turn, talks to the scheduler API to launch and manage jobs in
-datacenter clusters. The command-line tool is called the Aurora
-client.</p>
-
-<p>The current implementation of the Aurora client is haphazard,
-and really needs to be cleaned up:</p>
-
-<ul>
-<li>The code is monolithic and hard to maintain. It&rsquo;s implemented using
-<code>twitter.common.app</code>, which assumes that all of the command code lives
-in a single source file. To work around this, and allow some
-subdivision, it uses a hack of <code>twitter.common.app</code> to force
-registration of commands from multiple modules. It&rsquo;s hard to
-understand, and hard to modify.</li>
-<li>The current code is very difficult to test. Because of the way it&rsquo;s
-built, there is no consistent way of passing key application data
-around. As a result, each unit test of client operations needs a
-difficult-to-assemble custom setup of mock objects.</li>
-<li>The current code handles errors poorly, and it is difficult to
-fix. Many common errors produce unacceptable results. For example,
-issuing an unknown command generates an error message &ldquo;main takes 0
-parameters but received 1&rdquo;; passing an invalid parameter to other
-commands frequently produces a stack trace.</li>
-<li>The current command line is confusing for users. There are several
-different kinds of objects manipulated by the Aurora command line,
-and the difference between them is often not entirely clear. (What&rsquo;s
-the difference between a job and a configuration?)
-For each type of object, there are different
-commands, and it&rsquo;s frequently not clear just which command should be
-used for which object.</li>
-</ul>
-
-<p>Instead of continuing to let it develop and evolve randomly, it&rsquo;s time
-to take a principled look at the Aurora command line, and figure out
-how to make command line processing make sense. At the same time, the
-code needs to be cleaned up, and divided into small comprehensible
-units based on a plugin architecture.</p>
-
-<h2 id="requirements">Requirements</h2>
-
-<p>Aurora is aimed at engineers who run jobs and services in a
-datacenter. As a result, the requirements for the aurora client are
-all engineering focused:</p>
-
-<ul>
-<li><strong>Consistency</strong>: commands should follow a consistent structure, so that
-users can apply knowledge and intuition gained from working with
-some aurora commands to new commands. This means that when commands
-can re-use the same options, they should; that objects should be
-referred to by consistent syntax throughout the tool.</li>
-<li><strong>Helpfulness</strong>: commands should be structured so that the system can
-generate helpful error messages. If a user just runs &ldquo;aurora&rdquo;, they
-should get a basic usage message. If they try to run an invalid
-command, they should get a message that the command is invalid, not
-a stack dump or &ldquo;command main() takes 0 parameters but received
-2&rdquo;. Commands should not generate extraneous output that obscures the
-key facts that the user needs to know, and the default behavior of
-commands should not generate outputs that will be routinely ignored
-by users.</li>
-<li><strong>Extensibility</strong>: it should be easy to plug in new commands,
-including custom commands, to adapt the Aurora client to new
-environments.</li>
-<li><strong>Script-friendly command output</strong>: every command should at least include
-an option that generates output that&rsquo;s script-friendly. Scripts should be
-able to work with command-output without needing to do screen scraping.</li>
-<li><strong>Scalability</strong>: the tools should be usable for any foreseeable size
-of Aurora datacenters and machine clusters.</li>
-</ul>
-
-<h2 id="design-overview">Design Overview</h2>
-
-<p>The Aurora client will be reimplemented using a noun-verb model,
-similar to the cmdlet model used by Monad/Windows Powershell. Users
-will work by providing a noun for the type of object being operated
-on, and a verb for the specific operation being performed on the
-object, followed by parameters. For example, to create a job, the user
-would execute: &ldquo;<code>aurora job create smfd/mchucarroll/devel/jobname
-job.aurora</code>&rdquo;. The noun is <code>job</code> and the verb is <code>create</code>.</p>
-
-<p>The client will be implemented following that noun-verb
-convention. Each noun will be a separate component, which can be
-registered into the command-line framework. Each verb will be
-implemented by a class that registers with the appropriate noun. Nouns
-and verbs will each provide methods that add their command line
-options and parameters to the options parser, using the Python
-argparse library.</p>
-
-<h2 id="detailed-design">Detailed Design</h2>
-
-<h3 id="interface">Interface</h3>
-
-<p>In this section, we&rsquo;ll walk through the types of objects that the
-client can manipulate, and the operations that need to be provided for
-each object. These form the primary interface that engineers will use
-to interact with Aurora.</p>
-
-<p>In the command-line, each of the object types will have an Aurora
-subcommand. The commands to manipulate the object type will follow the
-type. For example, here are several commands in the old syntax
-contrasted against the new noun/verb syntax.</p>
-
-<ul>
-<li>Get quota for a role:
-
-<ul>
-<li>Noun/Verb syntax:  <code>aurora quota get west/www-data</code></li>
-<li>Old syntax: <code>aurora get_quota --cluster=smf1 www-data</code></li>
-</ul></li>
-<li>Create job:
-
-<ul>
-<li>Noun/Verb syntax: <code>aurora job create west/www-data/test/job job.aurora</code></li>
-<li>Old syntax: <code>aurora create west/www-data/test/job job.aurora</code></li>
-</ul></li>
-<li>Schedule a job to run at a specific interval:
-
-<ul>
-<li>Noun/verb: <code>aurora cron schedule east/www-data/test/job job.aurora</code></li>
-<li>Old: <code>aurora create east/www-data/test/job job.aurora</code></li>
-</ul></li>
-</ul>
-
-<p>As you can see in these examples, the new syntax is more consistent:
-you always specify the cluster where a command executes as part of an
-identifier, where in the old syntax, it was sometimes part of the
-jobkey and sometimes specified with a &ldquo;&ndash;cluster&rdquo; option.</p>
-
-<p>The new syntax is also more clear and explicit: even without knowing
-much about Aurora, it&rsquo;s clear what objects each command is acting on,
-where in the old syntax, commands like &ldquo;create&rdquo; are unclear.</p>
-
-<h3 id="the-job-noun">The Job Noun</h3>
-
-<p>A job is a configured program ready to run in Aurora. A job is,
-conceptually, a task factory: when a job is submitted to the Aurora
-scheduler, it creates a collection of tasks. The job contains a
-complete description of everything it needs to create a collection of
-tasks. (Note that this subsumes &ldquo;service&rdquo; commands. A service is just
-a task whose configuration sets the is_service flag, so we don&rsquo;t have
-separate commands for working with services.) Jobs are specified using
-<code>cluster/role/env/name</code> jobkey syntax.</p>
-
-<ul>
-<li><code>aurora job create *jobkey* *config*</code>:  submits a job to a cluster, launching the task(s) specified by the job config.</li>
-<li><code>aurora job status *jobkey*</code>: query job status. Prints information about the job,
-whether it&rsquo;s running, etc., to standard out. If jobkey includes
-globs, it should list all jobs that match the glob</li>
-<li><code>aurora job kill *jobkey*/*instanceids*</code>: kill/stop some of a jobs instances. This stops a job&#39; tasks; if the job
-has service tasks, they&rsquo;ll be  disabled, so that they won&rsquo;t restart.</li>
-<li><code>aurora job killall *jobkey*</code>: kill all of the instances of a job. This
-is distinct from the <em>kill</em> command as a safety measure: omitting the
-instances from a kill command shouldn&rsquo;t result in destroying the entire job.</li>
-<li><code>aurora job restart *jobkey*</code>: conceptually, this will kill a job, and then
-launch it again. If the job does not exist, then fail with an error
-message.  In fact, the underlying implementation does the
-kill/relaunch on a rolling basis - so it&rsquo;s not an immediate kill of
-all shards/instances, followed by a delay as all instances relaunch,
-but rather a controlled gradual process.</li>
-<li><code>aurora job list *jobkey*</code>: list all jobs that match the jobkey spec that are
-registered with the scheduler. This will include both jobs that are
-currently running, and jobs that are scheduled to run at a later
-time. The job key can be partial: if it specifies cluster, all jobs
-on the cluster will be listed; cluster/role, all jobs running on the cluster under the role will be listed, etc.</li>
-</ul>
-
-<h2 id="the-schedule-noun-(cron)">The Schedule Noun (Cron)</h2>
-
-<p>Note (3/21/2014): The &ldquo;cron&rdquo; noun is <em>not</em> implemented yet.</p>
-
-<p>Cron is a scheduler adjunct that periodically runs a job on a
-schedule. The cron commands all manipulate cron schedule entries. The
-schedules are specified as a part of the job configuration.</p>
-
-<ul>
-<li><code>aurora cron schedule jobkey config</code>: schedule a job to run by cron. If a cron job already exists
-replaces its template with a new one.</li>
-<li><code>aurora cron deschedule jobkey</code>: removes a jobs entry from the cron schedule.</li>
-<li><code>aurora cron status jobkey</code>: query for a scheduled job&rsquo;s status.</li>
-</ul>
-
-<h2 id="the-quota-noun">The Quota Noun</h2>
-
-<p>A quota is a data object maintained by the scheduler that specifies the maximum
-resources that may be consumed by jobs owned by a particular role. In the future,
-we may add new quota types. At some point, we&rsquo;ll also probably add an administrators
-command to set quotas.</p>
-
-<ul>
-<li><code>aurora quota get *cluster/role*</code></li>
-</ul>
-
-<h2 id="implementation">Implementation</h2>
-
-<p>The current command line is monolithic. Every command on an Aurora
-object is a top-level command in the Aurora client. In the
-restructured command line, each of the primary object types
-manipulated by Aurora should have its own sub-command.</p>
-
-<ul>
-<li>Advantages of this approach:
-
-<ul>
-<li>Easier to detangle the command-line processing. The top-level
-command-processing will be a small set of subcommand
-processors. Option processing for each subcommand can be offloaded
-to a separate module.</li>
-<li>The aurora top-level help command will be much more
-comprehensible. Instead of giving a huge list of every possible
-command, it will present the list of top-level object types, and
-then users can request help on the commands for a specific type
-of object.</li>
-<li>The sub-commands can be separated into distinct command-line
-tools when appropriate.</li>
-</ul></li>
-</ul>
-
-<h3 id="command-structure-and-options-processing">Command Structure and Options Processing</h3>
-
-<p>The implementation will follow closely on Pants goals. Pants goals use
-a static registration system to add new subcommands. In pants, each
-goal command is an implementation of a command interface, and provides
-implementations of methods to register options and parameters, and to
-actually execute the command. In this design, commands are modular and
-easy to implement, debug, and combine in different ways.</p>
-
-<p>For the Aurora client, we plan to use a two-level variation of the
-basic concept from pants. At the top-level we will have nouns. A noun
-will define some common command-line parameters required by all of its
-verbs, and will provide a registration hook for attaching verbs. Nouns
-will be implemented as a subclass of a basic Noun type.</p>
-
-<p>Each verb will, similarly, be implemented as a subclass of Verb. Verbs
-will be able to specify command-line options and parameters.</p>
-
-<p>Both <code>Noun</code> and <code>Verb</code> will be subclasses of a common base-class <code>AuroraCommand</code>:</p>
-<pre class="highlight text">class AuroraCommand(object):
-  def get_options(self):
-  &quot;&quot;&quot;Gets the set of command-line options objects for this command.
-  The result is a list of CommandOption objects.
-   &quot;&quot;&quot;
-    pass
-
-  @property
-  def help(self):
-    &quot;&quot;&quot;Returns the help message for this command&quot;&quot;&quot;
-
-  @property
-  def usage(self):
-    &quot;&quot;&quot;Returns a short usage description of the command&quot;&quot;&quot;
-
-  @property
-  def name(self):
-    &quot;&quot;&quot;Returns the command name&quot;&quot;&quot;
-</pre>
-<p>A command-line tool will be implemented as an instance of a <code>CommandLine</code>:</p>
-<pre class="highlight text">class CommandLine(object):
-  &quot;&quot;&quot;The top-level object implementing a command-line application.&quot;&quot;&quot;
-
-  @property
-  def name(self):
-    &quot;&quot;&quot;Returns the name of this command-line tool&quot;&quot;&quot;
-
-  def print_out(self, str):
-    print(str)
-
-  def print_err(self, str):
-    print(str, file=sys.stderr)
-
-  def register_noun(self, noun):
-    &quot;&quot;&quot;Adds a noun to the application&quot;&quot;&quot;
-
-  def register_plugin(self, plugin):
-     &quot;&quot;&quot;Adds a configuration plugin to the system&quot;&quot;&quot;
-</pre>
-<p>Nouns are registered into a command-line using the <code>register_noun</code>
-method. They are weakly coupled to the application, making it easy to
-use a single noun in several different command-line tools. Nouns allow
-the registration of verbs using the <code>register_verb</code> method.</p>
-
-<p>When commands execute, they&rsquo;re given an instance of a <em>context object</em>.
-The context object must be an instance of a subclass of <code>AuroraCommandContext</code>.
-Options, parameters, and IO are all accessed using the context object. The context
-is created dynamically by the noun object owning the verb being executed. Developers
-are strongly encouraged to implement custom contexts for their nouns, and move functionality
-shared by the noun&rsquo;s verbs into the context object. The context interface is:</p>
-<pre class="highlight text">class Context(object):
-  class Error(Exception): pass
-
-  class ArgumentException(Error): pass
-
-  class CommandError(Error):
-
-  @classmethod
-  def exit(cls, code, msg):
-    &quot;&quot;&quot;Exit the application with an error message&quot;&quot;&quot;
-    raise cls.CommandError(code, msg)
-
- def print_out(self, msg, indent=0):
-   &quot;&quot;&quot;Prints a message to standard out, with an indent&quot;&quot;&quot;
-
- def print_err(self, msg, indent=0):
-   &quot;&quot;&quot;Prints a message to standard err, with an indent&quot;&quot;&quot;
-</pre>
-<p>In addition to nouns and verbs, there&rsquo;s one more kind of registerable
-component, called a <em>configuration plugin</em>. These objects add a set of
-command-line options that can be passed to <em>all</em> of the commands
-implemented in the tool. Before the command is executed, the
-configuration plugin will be invoked, and will process its
-command-line arguments. This is useful for general configuration
-changes, like establish a secure tunnel to talk to machines in a
-datacenter. (A useful way to think of a plugin is as something like an
-aspect that can be woven in to aurora to provide environment-specific
-configuration.) A configuration plugin is implemented as an instance
-of class <code>ConfigurationPlugin</code>, and registered with the
-<code>register_plugin</code> method of the <code>CommandLine</code> object. The interface of
-a plugin is:</p>
-<pre class="highlight text">class ConfigurationPlugin(object):
-  &quot;&quot;&quot;A component that can be plugged in to a command-line.&quot;&quot;&quot;
-
-  @abstractmethod
-  def get_options(self):
-    &quot;&quot;&quot;Return the set of options processed by this plugin&quot;&quot;&quot;
-
-  @abstractmethod
-  def execute(self, context):
-    &quot;&quot;&quot;Run the context/command line initialization code for this plugin.&quot;&quot;&quot;
-</pre>
-<h3 id="command-execution">Command Execution</h3>
-
-<p>The options process and command execution is built as a facade over Python&rsquo;s
-standard argparse. All of the actual argument processing is done by the
-argparse library.</p>
-
-<p>Once the options are processed, the framework will start to execute the command. Command execution consists of:</p>
-
-<h1 id="create-a-context-object.-the-framework-will-use-the-argparse-options-to-identify">Create a context object. The framework will use the argparse options to identify</h1>
-
-<p>which noun is being invoked, and will call that noun&rsquo;s <code>create_context</code> method.
-  The argparse options object will be stored in the context.</p>
-
-<h1 id="execute-any-configuration-plugins.-before-any-command-is-invoked,-the-framework">Execute any configuration plugins. Before any command is invoked, the framework</h1>
-
-<p>will first iterate over all of the registered configuration plugins. For each
-  plugin, it will invoke the <code>execute</code> method.</p>
-
-<h1 id="the-noun-will-use-the-context-to-find-out-what-verb-is-being-invoked,-and-it-will">The noun will use the context to find out what verb is being invoked, and it will</h1>
-
-<p>then call that verb&rsquo;s <code>execute</code> method.</p>
-
-<h1 id="the-command-will-exit.-its-return-code-will-be-whatever-was-returned-by-the-verb&#39;s">The command will exit. Its return code will be whatever was returned by the verb&rsquo;s</h1>
-
-<p><code>execute</code> method.</p>
-
-<p>Commands are expected to return a code from a list of standard exit codes,
-which can be found in <code>src/main/python/apache/aurora/client/cli/__init__.py</code>.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/committers/index.html b/publish/documentation/latest/committers/index.html
deleted file mode 100644
index bda3db4..0000000
--- a/publish/documentation/latest/committers/index.html
+++ /dev/null
@@ -1,141 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h2 id="setting-up-your-email-account">Setting up your email account</h2>
-
-<p>Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
-email forwarding address at</p>
-
-<p><a href="http://id.apache.org">http://id.apache.org</a></p>
-
-<p>Additional instructions for setting up your new committer email can be found at</p>
-
-<p><a href="http://www.apache.org/dev/user-email.html">http://www.apache.org/dev/user-email.html</a></p>
-
-<p>The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
-emails to your @apache.org email address.</p>
-
-<h2 id="creating-a-gpg-key-for-releases">Creating a gpg key for releases</h2>
-
-<p>In order to create a release candidate you will need a gpg key published to an external key server
-and that key will need to be added to our KEYS file as well.</p>
-
-<ol>
-<li><p>Create a key:</p>
-<pre class="highlight text">       gpg --gen-key
-</pre></li>
-<li><p>Add your gpg key to the Apache Aurora KEYS file:</p>
-<pre class="highlight text">       git clone https://git-wip-us.apache.org/repos/asf/incubator-aurora.git
-       (gpg --list-sigs &lt;KEY ID&gt; &amp;&amp; gpg --armor --export &lt;KEY ID&gt;) &gt;&gt; KEYS
-       git add KEYS &amp;&amp; git commit -m &quot;Adding gpg key for &lt;APACHE ID&gt;&quot;
-       ./rbt post -o -g
-</pre></li>
-<li><p>Publish the key to an external key server:</p>
-<pre class="highlight text">       gpg --keyserver pgp.mit.edu --send-keys &lt;KEY ID&gt;
-</pre></li>
-<li><p>Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:</p>
-<pre class="highlight text">       https://dist.apache.org/repos/dist/dev/incubator/aurora/KEYS
-       https://dist.apache.org/repos/dist/release/incubator/aurora/KEYS
-</pre></li>
-<li><p>Add your key to git config for use with the release scripts:</p>
-<pre class="highlight text">       git config --global user.signingkey &lt;KEY ID&gt;
-</pre></li>
-</ol>
-
-<h2 id="creating-a-release">Creating a release</h2>
-
-<p>The following will guide you through the steps to create a release candidate, vote, and finally an
-official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
-must have access to commit to the dist.a.o repositories.</p>
-
-<ol>
-<li><p>Ensure that all issues resolved for this release candidate are tagged with the correct Fix
-Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.</p></li>
-<li><p>Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
-branch and update the current version within the trunk. To create a minor version update and publish
-it run</p>
-<pre class="highlight text">       ./build-support/release/release-candidate -l m -p
-</pre></li>
-<li><p>Update, if necessary, the draft email created from the <code>release-candidate</code> script in step #2 and
-send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
-and checksums by running</p>
-<pre class="highlight text">        ./build-support/release/verify-release-candidate
-</pre></li>
-<li><p>Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
-run again, this time you will use the -r flag to increment the release candidate version. This will
-automatically clean up the release candidate rc0 branch and source distribution.</p>
-<pre class="highlight text">       ./build-support/release/release-candidate -l m -r 1 -p
-</pre></li>
-<li><p>Once the vote has successfully passed create the release</p>
-<pre class="highlight text">       ./build-support/release/release
-</pre></li>
-<li><p>Update the draft email created fom the <code>release</code> script in step #5 to include the Apache ID&rsquo;s for
-all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.</p></li>
-</ol>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/configuration-reference/index.html b/publish/documentation/latest/configuration-reference/index.html
deleted file mode 100644
index d0e4473..0000000
--- a/publish/documentation/latest/configuration-reference/index.html
+++ /dev/null
@@ -1,879 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="aurora-+-thermos-configuration-reference">Aurora + Thermos Configuration Reference</h1>
-
-<ul>
-<li><a href="#aurora--thermos-configuration-reference">Aurora + Thermos Configuration Reference</a></li>
-<li><a href="#introduction">Introduction</a></li>
-<li><a href="#process-schema">Process Schema</a>
-
-<ul>
-<li><a href="#process-objects">Process Objects</a>
-
-<ul>
-<li><a href="#name">name</a></li>
-<li><a href="#cmdline">cmdline</a></li>
-<li><a href="#max_failures">max_failures</a></li>
-<li><a href="#daemon">daemon</a></li>
-<li><a href="#ephemeral">ephemeral</a></li>
-<li><a href="#min_duration">min_duration</a></li>
-<li><a href="#final">final</a></li>
-</ul></li>
-</ul></li>
-<li><a href="#task-schema">Task Schema</a>
-
-<ul>
-<li><a href="#task-object">Task Object</a>
-
-<ul>
-<li><a href="#name-1">name</a></li>
-<li><a href="#processes">processes</a></li>
-<li><a href="#constraints">constraints</a></li>
-<li><a href="#resources">resources</a></li>
-<li><a href="#max_failures-1">max_failures</a></li>
-<li><a href="#max_concurrency">max_concurrency</a></li>
-<li><a href="#finalization_wait">finalization_wait</a></li>
-</ul></li>
-<li><a href="#constraint-object">Constraint Object</a></li>
-<li><a href="#resource-object">Resource Object</a></li>
-</ul></li>
-<li><a href="#job-schema">Job Schema</a>
-
-<ul>
-<li><a href="#job-objects">Job Objects</a></li>
-<li><a href="#services">Services</a></li>
-<li><a href="#updateconfig-objects">UpdateConfig Objects</a></li>
-<li><a href="#healthcheckconfig-objects">HealthCheckConfig Objects</a></li>
-<li><a href="#announcer-objects">Announcer Objects</a></li>
-</ul></li>
-<li><a href="#specifying-scheduling-constraints">Specifying Scheduling Constraints</a></li>
-<li><a href="#template-namespaces">Template Namespaces</a>
-
-<ul>
-<li><a href="#mesos-namespace">mesos Namespace</a></li>
-<li><a href="#thermos-namespace">thermos Namespace</a></li>
-</ul></li>
-<li><a href="#basic-examples">Basic Examples</a>
-
-<ul>
-<li><a href="#hello_worldaurora">hello_world.aurora</a></li>
-<li><a href="#environment-tailoring">Environment Tailoring</a>
-
-<ul>
-<li><a href="#hello_world_productionizedaurora">hello<em>world</em>productionized.aurora</a></li>
-</ul></li>
-</ul></li>
-</ul>
-
-<h1 id="introduction">Introduction</h1>
-
-<p>Don&rsquo;t know where to start? The Aurora configuration schema is very
-powerful, and configurations can become quite complex for advanced use
-cases.</p>
-
-<p>For examples of simple configurations to get something up and running
-quickly, check out the <a href="/documentation/latest/tutorial/">Tutorial</a>. When you feel comfortable with the basics, move
-on to the <a href="/documentation/latest/configuration-tutorial/">Configuration Tutorial</a> for more in-depth coverage of
-configuration design.</p>
-
-<p>For additional basic configuration examples, see <a href="#BasicExamples">the end of this document</a>.</p>
-
-<h1 id="process-schema">Process Schema</h1>
-
-<p>Process objects consist of required <code>name</code> and <code>cmdline</code> attributes. You can customize Process
-behavior with its optional attributes. Remember, Processes are handled by Thermos.</p>
-
-<h3 id="process-objects">Process Objects</h3>
-
-<table><thead>
-<tr>
-<th><strong>Attribute Name</strong></th>
-<th style="text-align: center"><strong>Type</strong></th>
-<th><strong>Description</strong></th>
-</tr>
-</thead><tbody>
-<tr>
-<td><strong>name</strong></td>
-<td style="text-align: center">String</td>
-<td>Process name (Required)</td>
-</tr>
-<tr>
-<td><strong>cmdline</strong></td>
-<td style="text-align: center">String</td>
-<td>Command line (Required)</td>
-</tr>
-<tr>
-<td><strong>max_failures</strong></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum process failures (Default: 1)</td>
-</tr>
-<tr>
-<td><strong>daemon</strong></td>
-<td style="text-align: center">Boolean</td>
-<td>When True, this is a daemon process. (Default: False)</td>
-</tr>
-<tr>
-<td><strong>ephemeral</strong></td>
-<td style="text-align: center">Boolean</td>
-<td>When True, this is an ephemeral process. (Default: False)</td>
-</tr>
-<tr>
-<td><strong>min_duration</strong></td>
-<td style="text-align: center">Integer</td>
-<td>Minimum duration between process restarts in seconds. (Default: 15)</td>
-</tr>
-<tr>
-<td><strong>final</strong></td>
-<td style="text-align: center">Boolean</td>
-<td>When True, this process is a finalizing one that should run last. (Default: False)</td>
-</tr>
-</tbody></table>
-
-<h4 id="name">name</h4>
-
-<p>The name is any valid UNIX filename string (specifically no
-slashes, NULLs or leading periods). Within a Task object, each Process name
-must be unique.</p>
-
-<h4 id="cmdline">cmdline</h4>
-
-<p>The command line run by the process. The command line is invoked in a bash
-subshell, so can involve fully-blown bash scripts. However, nothing is
-supplied for command-line arguments so <code>$*</code> is unspecified.</p>
-
-<h4 id="max_failures">max_failures</h4>
-
-<p>The maximum number of failures (non-zero exit statuses) this process can
-have before being marked permanently failed and not retried. If a
-process permanently fails, Thermos looks at the failure limit of the task
-containing the process (usually 1) to determine if the task has
-failed as well.</p>
-
-<p>Setting <code>max_failures</code> to 0 makes the process retry
-indefinitely until it achieves a successful (zero) exit status.
-It retries at most once every <code>min_duration</code> seconds to prevent
-an effective denial of service attack on the coordinating Thermos scheduler.</p>
-
-<h4 id="daemon">daemon</h4>
-
-<p>By default, Thermos processes are non-daemon. If <code>daemon</code> is set to True, a
-successful (zero) exit status does not prevent future process runs.
-Instead, the process reinvokes after <code>min_duration</code> seconds.
-However, the maximum failure limit still applies. A combination of
-<code>daemon=True</code> and <code>max_failures=0</code> causes a process to retry
-indefinitely regardless of exit status. This should be avoided
-for very short-lived processes because of the accumulation of
-checkpointed state for each process run. When running in Mesos
-specifically, <code>max_failures</code> is capped at 100.</p>
-
-<h4 id="ephemeral">ephemeral</h4>
-
-<p>By default, Thermos processes are non-ephemeral. If <code>ephemeral</code> is set to
-True, the process&#39; status is not used to determine if its containing task
-has completed. For example, consider a task with a non-ephemeral
-webserver process and an ephemeral logsaver process
-that periodically checkpoints its log files to a centralized data store.
-The task is considered finished once the webserver process has
-completed, regardless of the logsaver&rsquo;s current status.</p>
-
-<h4 id="min_duration">min_duration</h4>
-
-<p>Processes may succeed or fail multiple times during a single task&rsquo;s
-duration. Each of these is called a <em>process run</em>. <code>min_duration</code> is
-the minimum number of seconds the scheduler waits before running the
-same process.</p>
-
-<h4 id="final">final</h4>
-
-<p>Processes can be grouped into two classes: ordinary processes and
-finalizing processes. By default, Thermos processes are ordinary. They
-run as long as the task is considered healthy (i.e., no failure
-limits have been reached.) But once all regular Thermos processes
-finish or the task reaches a certain failure threshold, it
-moves into a &ldquo;finalization&rdquo; stage and runs all finalizing
-processes. These are typically processes necessary for cleaning up the
-task, such as log checkpointers, or perhaps e-mail notifications that
-the task completed.</p>
-
-<p>Finalizing processes may not depend upon ordinary processes or
-vice-versa, however finalizing processes may depend upon other
-finalizing processes and otherwise run as a typical process
-schedule.</p>
-
-<h1 id="task-schema">Task Schema</h1>
-
-<p>Tasks fundamentally consist of a <code>name</code> and a list of Process objects stored as the
-value of the <code>processes</code> attribute. Processes can be further constrained with
-<code>constraints</code>. By default, <code>name</code>&rsquo;s value inherits from the first Process in the
-<code>processes</code> list, so for simple <code>Task</code> objects with one Process, <code>name</code>
-can be omitted. In Mesos, <code>resources</code> is also required.</p>
-
-<h3 id="task-object">Task Object</h3>
-
-<table><thead>
-<tr>
-<th><strong>param</strong></th>
-<th style="text-align: center"><strong>type</strong></th>
-<th><strong>description</strong></th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>name</code></td>
-<td style="text-align: center">String</td>
-<td>Process name (Required) (Default: <code>processes0.name</code>)</td>
-</tr>
-<tr>
-<td><code>processes</code></td>
-<td style="text-align: center">List of <code>Process</code> objects</td>
-<td>List of <code>Process</code> objects bound to this task. (Required)</td>
-</tr>
-<tr>
-<td><code>constraints</code></td>
-<td style="text-align: center">List of <code>Constraint</code> objects</td>
-<td>List of <code>Constraint</code> objects constraining processes.</td>
-</tr>
-<tr>
-<td><code>resources</code></td>
-<td style="text-align: center"><code>Resource</code> object</td>
-<td>Resource footprint. (Required)</td>
-</tr>
-<tr>
-<td><code>max_failures</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum process failures before being considered failed (Default: 1)</td>
-</tr>
-<tr>
-<td><code>max_concurrency</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of concurrent processes (Default: 0, unlimited concurrency.)</td>
-</tr>
-<tr>
-<td><code>finalization_wait</code></td>
-<td style="text-align: center">Integer</td>
-<td>Amount of time allocated for finalizing processes, in seconds. (Default: 30)</td>
-</tr>
-</tbody></table>
-
-<h4 id="name">name</h4>
-
-<p><code>name</code> is a string denoting the name of this task. It defaults to the name of the first Process in
-the list of Processes associated with the <code>processes</code> attribute.</p>
-
-<h4 id="processes">processes</h4>
-
-<p><code>processes</code> is an unordered list of <code>Process</code> objects. To constrain the order
-in which they run, use <code>constraints</code>.</p>
-
-<h5 id="constraints">constraints</h5>
-
-<p>A list of <code>Constraint</code> objects. Currently it supports only one type,
-the <code>order</code> constraint. <code>order</code> is a list of process names
-that should run in the order given. For example,</p>
-<pre class="highlight text">    process = Process(cmdline = &quot;echo hello {{name}}&quot;)
-    task = Task(name = &quot;echoes&quot;,
-                processes = [process(name = &quot;jim&quot;), process(name = &quot;bob&quot;)],
-                constraints = [Constraint(order = [&quot;jim&quot;, &quot;bob&quot;]))
-</pre>
-<p>Constraints can be supplied ad-hoc and in duplicate. Not all
-Processes need be constrained, however Tasks with cycles are
-rejected by the Thermos scheduler.</p>
-
-<p>Use the <code>order</code> function as shorthand to generate <code>Constraint</code> lists.
-The following:</p>
-<pre class="highlight text">    order(process1, process2)
-</pre>
-<p>is shorthand for</p>
-<pre class="highlight text">    [Constraint(order = [process1.name(), process2.name()])]
-</pre>
-<h4 id="resources">resources</h4>
-
-<p>Takes a <code>Resource</code> object, which specifies the amounts of CPU, memory, and disk space resources
-to allocate to the Task.</p>
-
-<h4 id="max_failures">max_failures</h4>
-
-<p><code>max_failures</code> is the number of times processes that are part of this
-Task can fail before the entire Task is marked for failure.</p>
-
-<p>For example:</p>
-<pre class="highlight text">    template = Process(max_failures=10)
-    task = Task(
-      name = &quot;fail&quot;,
-      processes = [
-         template(name = &quot;failing&quot;, cmdline = &quot;exit 1&quot;),
-         template(name = &quot;succeeding&quot;, cmdline = &quot;exit 0&quot;)
-      ],
-      max_failures=2)
-</pre>
-<p>The <code>failing</code> Process could fail 10 times before being marked as
-permanently failed, and the <code>succeeding</code> Process would succeed on the
-first run. The task would succeed despite only allowing for two failed
-processes. To be more specific, there would be 10 failed process runs
-yet 1 failed process.</p>
-
-<h4 id="max_concurrency">max_concurrency</h4>
-
-<p>For Tasks with a number of expensive but otherwise independent
-processes, you may want to limit the amount of concurrency
-the Thermos scheduler provides rather than artificially constraining
-it via <code>order</code> constraints. For example, a test framework may
-generate a task with 100 test run processes, but wants to run it on
-a machine with only 4 cores. You can limit the amount of parallelism to
-4 by setting <code>max_concurrency=4</code> in your task configuration.</p>
-
-<p>For example, the following task spawns 180 Processes (&ldquo;mappers&rdquo;)
-to compute individual elements of a 180 degree sine table, all dependent
-upon one final Process (&ldquo;reducer&rdquo;) to tabulate the results:</p>
-<pre class="highlight text">def make_mapper(id):
-  return Process(
-    name = &quot;mapper%03d&quot; % id,
-    cmdline = &quot;echo &#39;scale=50;s(%d\*4\*a(1)/180)&#39; | bc -l &gt;
-               temp.sine_table.%03d&quot; % (id, id))
-
-def make_reducer():
-  return Process(name = &quot;reducer&quot;, cmdline = &quot;cat temp.\* | nl \&gt; sine\_table.txt
-                 &amp;&amp; rm -f temp.\*&quot;)
-
-processes = map(make_mapper, range(180))
-
-task = Task(
-  name = &quot;mapreduce&quot;,
-  processes = processes + [make\_reducer()],
-  constraints = [Constraint(order = [mapper.name(), &#39;reducer&#39;]) for mapper
-                 in processes],
-  max_concurrency = 8)
-</pre>
-<h4 id="finalization_wait">finalization_wait</h4>
-
-<p>Tasks have three active stages: <code>ACTIVE</code>, <code>CLEANING</code>, and <code>FINALIZING</code>. The
-<code>ACTIVE</code> stage is when ordinary processes run. This stage lasts as
-long as Processes are running and the Task is healthy. The moment either
-all Processes have finished successfully or the Task has reached a
-maximum Process failure limit, it goes into <code>CLEANING</code> stage and send
-SIGTERMs to all currently running Processes and their process trees.
-Once all Processes have terminated, the Task goes into <code>FINALIZING</code> stage
-and invokes the schedule of all Processes with the &ldquo;final&rdquo; attribute set to True.</p>
-
-<p>This whole process from the end of <code>ACTIVE</code> stage to the end of <code>FINALIZING</code>
-must happen within <code>finalization_wait</code> seconds. If it does not
-finish during that time, all remaining Processes are sent SIGKILLs
-(or if they depend upon uncompleted Processes, are
-never invoked.)</p>
-
-<p>Client applications with higher priority may force a shorter
-finalization wait (e.g. through parameters to <code>thermos kill</code>), so this
-is mostly a best-effort signal.</p>
-
-<h3 id="constraint-object">Constraint Object</h3>
-
-<p>Current constraint objects only support a single ordering constraint, <code>order</code>,
-which specifies its processes run sequentially in the order given. By
-default, all processes run in parallel when bound to a <code>Task</code> without
-ordering constraints.</p>
-
-<table><thead>
-<tr>
-<th>param</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td>order</td>
-<td style="text-align: center">List of String</td>
-<td>List of processes by name (String) that should be run serially.</td>
-</tr>
-</tbody></table>
-
-<h3 id="resource-object">Resource Object</h3>
-
-<p>Specifies the amount of CPU, Ram, and disk resources the task needs. See the
-<a href="/documentation/latest/resource-isolation/">Resource Isolation document</a> for suggested values and to understand how
-resources are allocated.</p>
-
-<table><thead>
-<tr>
-<th>param</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>cpu</code></td>
-<td style="text-align: center">Float</td>
-<td>Fractional number of cores required by the task.</td>
-</tr>
-<tr>
-<td><code>ram</code></td>
-<td style="text-align: center">Integer</td>
-<td>Bytes of RAM required by the task.</td>
-</tr>
-<tr>
-<td><code>disk</code></td>
-<td style="text-align: center">Integer</td>
-<td>Bytes of disk required by the task.</td>
-</tr>
-</tbody></table>
-
-<h1 id="job-schema">Job Schema</h1>
-
-<h3 id="job-objects">Job Objects</h3>
-
-<table><thead>
-<tr>
-<th>name</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>task</code></td>
-<td style="text-align: center">Task</td>
-<td>The Task object to bind to this job. Required.</td>
-</tr>
-<tr>
-<td><code>name</code></td>
-<td style="text-align: center">String</td>
-<td>Job name. (Default: inherited from the task attribute&rsquo;s name)</td>
-</tr>
-<tr>
-<td><code>role</code></td>
-<td style="text-align: center">String</td>
-<td>Job role account. Required.</td>
-</tr>
-<tr>
-<td><code>cluster</code></td>
-<td style="text-align: center">String</td>
-<td>Cluster in which this job is scheduled. Required.</td>
-</tr>
-<tr>
-<td><code>environment</code></td>
-<td style="text-align: center">String</td>
-<td>Job environment, default <code>devel</code>. Must be one of <code>prod</code>, <code>devel</code>, <code>test</code> or <code>staging&lt;number&gt;</code>.</td>
-</tr>
-<tr>
-<td><code>contact</code></td>
-<td style="text-align: center">String</td>
-<td>Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.</td>
-</tr>
-<tr>
-<td><code>instances</code></td>
-<td style="text-align: center">Integer</td>
-<td>Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)</td>
-</tr>
-<tr>
-<td><code>cron_schedule</code></td>
-<td style="text-align: center">String</td>
-<td>Cron schedule in cron format. May only be used with non-service jobs. See <a href="/documentation/latest/cron-jobs/">Cron Jobs</a> for more information. Default: None (not a cron job.)</td>
-</tr>
-<tr>
-<td><code>cron_collision_policy</code></td>
-<td style="text-align: center">String</td>
-<td>Policy to use when a cron job is triggered while a previous run is still active. KILL<em>EXISTING Kill the previous run, and schedule the new run CANCEL</em>NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)</td>
-</tr>
-<tr>
-<td><code>update_config</code></td>
-<td style="text-align: center"><code>update_config</code> object</td>
-<td>Parameters for controlling the rate and policy of rolling updates.</td>
-</tr>
-<tr>
-<td><code>constraints</code></td>
-<td style="text-align: center">dict</td>
-<td>Scheduling constraints for the tasks. See the section on the <a href="#Specifying-Scheduling-Constraints">constraint specification language</a></td>
-</tr>
-<tr>
-<td><code>service</code></td>
-<td style="text-align: center">Boolean</td>
-<td>If True, restart tasks regardless of success or failure. (Default: False)</td>
-</tr>
-<tr>
-<td><code>max_task_failures</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures</td>
-</tr>
-<tr>
-<td><code>priority</code></td>
-<td style="text-align: center">Integer</td>
-<td>Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.</td>
-</tr>
-<tr>
-<td><code>production</code></td>
-<td style="text-align: center">Boolean</td>
-<td>Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota.</td>
-</tr>
-<tr>
-<td><code>health_check_config</code></td>
-<td style="text-align: center"><code>heath_check_config</code> object</td>
-<td>Parameters for controlling a task&rsquo;s health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.</td>
-</tr>
-</tbody></table>
-
-<h3 id="services">Services</h3>
-
-<p>Jobs with the <code>service</code> flag set to True are called Services. The <code>Service</code>
-alias can be used as shorthand for <code>Job</code> with <code>service=True</code>.
-Services are differentiated from non-service Jobs in that tasks
-always restart on completion, whether successful or unsuccessful.
-Jobs without the service bit set only restart up to
-<code>max_task_failures</code> times and only if they terminated unsuccessfully
-either due to human error or machine failure.</p>
-
-<h3 id="updateconfig-objects">UpdateConfig Objects</h3>
-
-<p>Parameters for controlling the rate and policy of rolling updates.</p>
-
-<table><thead>
-<tr>
-<th>object</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>batch_size</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of shards to be updated in one iteration (Default: 1)</td>
-</tr>
-<tr>
-<td><code>restart_threshold</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of seconds before a shard must move into the <code>RUNNING</code> state before considered a failure (Default: 60)</td>
-</tr>
-<tr>
-<td><code>watch_secs</code></td>
-<td style="text-align: center">Integer</td>
-<td>Minimum number of seconds a shard must remain in <code>RUNNING</code> state before considered a success (Default: 45)</td>
-</tr>
-<tr>
-<td><code>max_per_shard_failures</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)</td>
-</tr>
-<tr>
-<td><code>max_total_failures</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)</td>
-</tr>
-</tbody></table>
-
-<h3 id="healthcheckconfig-objects">HealthCheckConfig Objects</h3>
-
-<p>Parameters for controlling a task&rsquo;s health checks via HTTP.</p>
-
-<table><thead>
-<tr>
-<th>object</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>initial_interval_secs</code></td>
-<td style="text-align: center">Integer</td>
-<td>Initial delay for performing an HTTP health check. (Default: 15)</td>
-</tr>
-<tr>
-<td><code>interval_secs</code></td>
-<td style="text-align: center">Integer</td>
-<td>Interval on which to check the task&rsquo;s health via HTTP. (Default: 10)</td>
-</tr>
-<tr>
-<td><code>timeout_secs</code></td>
-<td style="text-align: center">Integer</td>
-<td>HTTP request timeout. (Default: 1)</td>
-</tr>
-<tr>
-<td><code>max_consecutive_failures</code></td>
-<td style="text-align: center">Integer</td>
-<td>Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)</td>
-</tr>
-</tbody></table>
-
-<h3 id="announcer-objects">Announcer Objects</h3>
-
-<p>If the <code>announce</code> field in the Job configuration is set, each task will be
-registered in the ServerSet <code>/aurora/role/environment/jobname</code> in the
-zookeeper ensemble configured by the executor.  If no Announcer object is specified,
-no announcement will take place.  For more information about ServerSets, see the <a href="/documentation/latest/user-guide/">User Guide</a>.</p>
-
-<table><thead>
-<tr>
-<th>object</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>primary_port</code></td>
-<td style="text-align: center">String</td>
-<td>Which named port to register as the primary endpoint in the ServerSet (Default: <code>http</code>)</td>
-</tr>
-<tr>
-<td><code>portmap</code></td>
-<td style="text-align: center">dict</td>
-<td>A mapping of additional endpoints to announced in the ServerSet (Default: <code>{ &#39;aurora&#39;: &#39;{{primary_port}}&#39; }</code>)</td>
-</tr>
-</tbody></table>
-
-<h3 id="port-aliasing-with-the-announcer-portmap">Port aliasing with the Announcer <code>portmap</code></h3>
-
-<p>The primary endpoint registered in the ServerSet is the one allocated to the port
-specified by the <code>primary_port</code> in the <code>Announcer</code> object, by default
-the <code>http</code> port.  This port can be referenced from anywhere within a configuration
-as <code>{{thermos.ports[http]}}</code>.</p>
-
-<p>Without the port map, each named port would be allocated a unique port number.
-The <code>portmap</code> allows two different named ports to be aliased together.  The default
-<code>portmap</code> aliases the <code>aurora</code> port (i.e. <code>{{thermos.ports[aurora]}}</code>) to
-the <code>http</code> port.  Even though the two ports can be referenced independently,
-only one port is allocated by Mesos.  Any port referenced in a <code>Process</code> object
-but which is not in the portmap will be allocated dynamically by Mesos and announced as well.</p>
-
-<p>It is possible to use the portmap to alias names to static port numbers, e.g.
-<code>{&#39;http&#39;: 80, &#39;https&#39;: 443, &#39;aurora&#39;: &#39;http&#39;}</code>.  In this case, referencing
-<code>{{thermos.ports[aurora]}}</code> would look up <code>{{thermos.ports[http]}}</code> then
-find a static port 80.  No port would be requested of or allocated by Mesos.</p>
-
-<p>Static ports should be used cautiously as Aurora does nothing to prevent two
-tasks with the same static port allocations from being co-scheduled.
-External constraints such as slave attributes should be used to enforce such
-guarantees should they be needed.</p>
-
-<h1 id="specifying-scheduling-constraints">Specifying Scheduling Constraints</h1>
-
-<p>Most users will not need to specify constraints explicitly, as the
-scheduler automatically inserts reasonable defaults that attempt to
-ensure reliability without impacting schedulability. For example, the
-scheduler inserts a <code>host: limit:1</code> constraint, ensuring
-that your shards run on different physical machines. Please do not
-set this field unless you are sure of what you are doing.</p>
-
-<p>In the <code>Job</code> object there is a map <code>constraints</code> from String to String
-allowing the user to tailor the schedulability of tasks within the job.</p>
-
-<p>Each slave in the cluster is assigned a set of string-valued
-key/value pairs called attributes. For example, consider the host
-<code>cluster1-aaa-03-sr2</code> and its following attributes (given in key:value
-format): <code>host:cluster1-aaa-03-sr2</code> and <code>rack:aaa</code>.</p>
-
-<p>The constraint map&rsquo;s key value is the attribute name in which we
-constrain Tasks within our Job. The value is how we constrain them.
-There are two types of constraints: <em>limit constraints</em> and <em>value
-constraints</em>.</p>
-
-<table><thead>
-<tr>
-<th>constraint</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td>Limit</td>
-<td>A string that specifies a limit for a constraint. Starts with <code>&#39;limit:</code> followed by an Integer and closing single quote, such as <code>&#39;limit:1&#39;</code>.</td>
-</tr>
-<tr>
-<td>Value</td>
-<td>A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a <code>!</code> <code>.</code></td>
-</tr>
-</tbody></table>
-
-<p>You can also control machine diversity using constraints. The below
-constraint ensures that no more than two instances of your job may run
-on a single host. Think of this as a &ldquo;group by&rdquo; limit.</p>
-<pre class="highlight text">constraints = {
-  &#39;host&#39;: &#39;limit:2&#39;,
-}
-</pre>
-<p>Likewise, you can use constraints to control rack diversity, e.g. at
-most one task per rack:</p>
-<pre class="highlight text">constraints = {
-  &#39;rack&#39;: &#39;limit:1&#39;,
-}
-</pre>
-<p>Use these constraints sparingly as they can dramatically reduce Tasks&#39; schedulability.</p>
-
-<h1 id="template-namespaces">Template Namespaces</h1>
-
-<p>Currently, a few Pystachio namespaces have special semantics. Using them
-in your configuration allow you to tailor application behavior
-through environment introspection or interact in special ways with the
-Aurora client or Aurora-provided services.</p>
-
-<h3 id="mesos-namespace">mesos Namespace</h3>
-
-<p>The <code>mesos</code> namespace contains the <code>instance</code> variable that can be used
-to distinguish between Task replicas.</p>
-
-<table><thead>
-<tr>
-<th>variable name</th>
-<th style="text-align: center">type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>instance</code></td>
-<td style="text-align: center">Integer</td>
-<td>The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.</td>
-</tr>
-</tbody></table>
-
-<h3 id="thermos-namespace">thermos Namespace</h3>
-
-<p>The <code>thermos</code> namespace contains variables that work directly on the
-Thermos platform in addition to Aurora. This namespace is fully
-compatible with Tasks invoked via the <code>thermos</code> CLI.</p>
-
-<table><thead>
-<tr>
-<th style="text-align: center">variable</th>
-<th>type</th>
-<th>description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td style="text-align: center"><code>ports</code></td>
-<td>map of string to Integer</td>
-<td>A map of names to port numbers</td>
-</tr>
-<tr>
-<td style="text-align: center"><code>task_id</code></td>
-<td>string</td>
-<td>The task ID assigned to this task.</td>
-</tr>
-</tbody></table>
-
-<p>The <code>thermos.ports</code> namespace is automatically populated by Aurora when
-invoking tasks on Mesos. When running the <code>thermos</code> command directly,
-these ports must be explicitly mapped with the <code>-P</code> option.</p>
-
-<p>For example, if &lsquo;{{<code>thermos.ports[http]</code>}}&rsquo; is specified in a <code>Process</code>
-configuration, it is automatically extracted and auto-populated by
-Aurora, but must be specified with, for example, <code>thermos -P http:12345</code>
-to map <code>http</code> to port 12345 when running via the CLI.</p>
-
-<h1 id="basic-examples">Basic Examples</h1>
-
-<p>These are provided to give a basic understanding of simple Aurora jobs.</p>
-
-<h3 id="hello_world.aurora">hello_world.aurora</h3>
-
-<p>Put the following in a file named <code>hello_world.aurora</code>, substituting your own values
-for values such as <code>cluster</code>s.</p>
-<pre class="highlight text">import os
-hello_world_process = Process(name = &#39;hello_world&#39;, cmdline = &#39;echo hello world&#39;)
-
-hello_world_task = Task(
-  resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
-  processes = [hello_world_process])
-
-hello_world_job = Job(
-  cluster = &#39;cluster1&#39;,
-  role = os.getenv(&#39;USER&#39;),
-  task = hello_world_task)
-
-jobs = [hello_world_job]
-</pre>
-<p>Then issue the following commands to create and kill the job, using your own values for the job key.</p>
-<pre class="highlight text">aurora create cluster1/$USER/test/hello_world hello_world.aurora
-
-aurora kill cluster1/$USER/test/hello_world
-</pre>
-<h3 id="environment-tailoring">Environment Tailoring</h3>
-
-<h4 id="helloworldproductionized.aurora">hello<em>world</em>productionized.aurora</h4>
-
-<p>Put the following in a file named <code>hello_world_productionized.aurora</code>, substituting your own values
-for values such as <code>cluster</code>s.</p>
-<pre class="highlight text">include(&#39;hello_world.aurora&#39;)
-
-production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
-staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
-hello_world_template = hello_world(
-    name = &quot;hello_world-{{cluster}}&quot;
-    task = hello_world(resources=production_resources))
-
-jobs = [
-  # production jobs
-  hello_world_template(cluster = &#39;cluster1&#39;, instances = 25),
-  hello_world_template(cluster = &#39;cluster2&#39;, instances = 15),
-
-  # staging jobs
-  hello_world_template(
-    cluster = &#39;local&#39;,
-    instances = 1,
-    task = hello_world(resources=staging_resources)),
-]
-</pre>
-<p>Then issue the following commands to create and kill the job, using your own values for the job key</p>
-<pre class="highlight text">aurora create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
-
-aurora kill cluster1/$USER/test/hello_world-cluster1
-</pre>
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/configuration-tutorial/index.html b/publish/documentation/latest/configuration-tutorial/index.html
deleted file mode 100644
index 3468077..0000000
--- a/publish/documentation/latest/configuration-tutorial/index.html
+++ /dev/null
@@ -1,1218 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="aurora-configuration-tutorial">Aurora Configuration Tutorial</h1>
-
-<p>How to write Aurora configuration files, including feature descriptions
-and best practices. When writing a configuration file, make use of
-<code>aurora inspect</code>. It takes the same job key and configuration file
-arguments as <code>aurora create</code> or <code>aurora update</code>. It first ensures the
-configuration parses, then outputs it in human-readable form.</p>
-
-<p>You should read this after going through the general <a href="/documentation/latest/tutorial/">Aurora Tutorial</a>.</p>
-
-<ul>
-<li><a href="#user-content-aurora-configuration-tutorial">Aurora Configuration Tutorial</a>
-
-<ul>
-<li><a href="#user-content-the-basics">The Basics</a>
-
-<ul>
-<li><a href="#user-content-use-bottom-to-top-object-ordering">Use Bottom-To-Top Object Ordering</a></li>
-</ul></li>
-<li><a href="#user-content-an-example-configuration-file">An Example Configuration File</a></li>
-<li><a href="#user-content-defining-process-objects">Defining Process Objects</a></li>
-<li><a href="#user-content-getting-your-code-into-the-sandbox">Getting Your Code Into The Sandbox</a></li>
-<li><a href="#user-content-defining-task-objects">Defining Task Objects</a>
-
-<ul>
-<li><a href="#user-content-sequentialtask-running-processes-in-parallel-or-sequentially">SequentialTask: Running Processes in Parallel or Sequentially</a></li>
-<li><a href="#user-content-simpletask">SimpleTask</a></li>
-<li><a href="#user-content-combining-tasks">Combining tasks</a></li>
-</ul></li>
-<li><a href="#user-content-defining-job-objects">Defining Job Objects</a></li>
-<li><a href="#user-content-the-jobs-list">The jobs List</a></li>
-<li><a href="#user-content-templating">Templating</a>
-
-<ul>
-<li><a href="#user-content-templating-1-binding-in-pystachio">Templating 1: Binding in Pystachio</a></li>
-<li><a href="#user-content-structurals-in-pystachio--aurora">Structurals in Pystachio / Aurora</a>
-
-<ul>
-<li><a href="#user-content-mustaches-within-structurals">Mustaches Within Structurals</a></li>
-</ul></li>
-<li><a href="#user-content-templating-2-structurals-are-factories">Templating 2: Structurals Are Factories</a>
-
-<ul>
-<li><a href="#user-content-a-second-way-of-templating">A Second Way of Templating</a></li>
-</ul></li>
-<li><a href="#user-content-advanced-binding">Advanced Binding</a>
-
-<ul>
-<li><a href="#user-content-bind-syntax">Bind Syntax</a></li>
-<li><a href="#user-content-binding-complex-objects">Binding Complex Objects</a>
-
-<ul>
-<li><a href="#user-content-lists"></a></li>
-<li><a href="#user-content-maps"></a></li>
-<li><a href="#user-content-structurals"></a></li>
-</ul></li>
-</ul></li>
-<li><a href="#user-content-structural-binding">Structural Binding</a></li>
-</ul></li>
-<li><a href="#user-content-configuration-file-writing-tips-and-best-practices">Configuration File Writing Tips And Best Practices</a>
-
-<ul>
-<li><a href="#user-content-use-as-few-aurora-files-as-possible">Use As Few .aurora Files As Possible</a></li>
-<li><a href="#user-content-avoid-boilerplate">Avoid Boilerplate</a></li>
-<li><a href="#user-content-thermos-uses-bash-but-thermos-is-not-bash">Thermos Uses bash, But Thermos Is Not bash</a>
-
-<ul>
-<li><a href="#user-content-bad">Bad</a></li>
-<li><a href="#user-content-good">Good</a></li>
-</ul></li>
-<li><a href="#user-content-rarely-use-functions-in-your-configurations">Rarely Use Functions In Your Configurations</a>
-
-<ul>
-<li><a href="#user-content-bad-1">Bad</a></li>
-<li><a href="#user-content-good-1">Good</a></li>
-</ul></li>
-</ul></li>
-</ul></li>
-</ul>
-
-<h2 id="the-basics">The Basics</h2>
-
-<p>To run a job on Aurora, you must specify a configuration file that tells
-Aurora what it needs to know to schedule the job, what Mesos needs to
-run the tasks the job is made up of, and what Thermos needs to run the
-processes that make up the tasks. This file must have
-a<code>.aurora</code> suffix.</p>
-
-<p>A configuration file defines a collection of objects, along with parameter
-values for their attributes. An Aurora configuration file contains the
-following three types of objects:</p>
-
-<ul>
-<li>Job</li>
-<li>Task</li>
-<li>Process</li>
-</ul>
-
-<p>A configuration also specifies a list of <code>Job</code> objects assigned
-to the variable <code>jobs</code>.</p>
-
-<ul>
-<li>jobs (list of defined Jobs to run)</li>
-</ul>
-
-<p>The <code>.aurora</code> file format is just Python. However, <code>Job</code>, <code>Task</code>,
-<code>Process</code>, and other classes are defined by a type-checked dictionary
-templating library called <em>Pystachio</em>, a powerful tool for
-configuration specification and reuse. Pystachio objects are tailored
-via {{}} surrounded templates.</p>
-
-<p>When writing your <code>.aurora</code> file, you may use any Pystachio datatypes, as
-well as any objects shown in the <a href="/documentation/latest/configuration-reference/"><em>Aurora+Thermos Configuration
-Reference</em></a>, without <code>import</code> statements - the
-Aurora config loader injects them automatically. Other than that, an <code>.aurora</code>
-file works like any other Python script.</p>
-
-<p><a href="/documentation/latest/configuration-reference/"><em>Aurora+Thermos Configuration Reference</em></a>
-has a full reference of all Aurora/Thermos defined Pystachio objects.</p>
-
-<h3 id="use-bottom-to-top-object-ordering">Use Bottom-To-Top Object Ordering</h3>
-
-<p>A well-structured configuration starts with structural templates (if
-any). Structural templates encapsulate in their attributes all the
-differences between Jobs in the configuration that are not directly
-manipulated at the <code>Job</code> level, but typically at the <code>Process</code> or <code>Task</code>
-level. For example, if certain processes are invoked with slightly
-different settings or input.</p>
-
-<p>After structural templates, define, in order, <code>Process</code>es, <code>Task</code>s, and
-<code>Job</code>s.</p>
-
-<p>Structural template names should be <em>UpperCamelCased</em> and their
-instantiations are typically <em>UPPER_SNAKE_CASED</em>. <code>Process</code>, <code>Task</code>,
-and <code>Job</code> names are typically <em>lower_snake_cased</em>. Indentation is typically 2
-spaces.</p>
-
-<h2 id="an-example-configuration-file">An Example Configuration File</h2>
-
-<p>The following is a typical configuration file. Don&rsquo;t worry if there are
-parts you don&rsquo;t understand yet, but you may want to refer back to this
-as you read about its individual parts. Note that names surrounded by
-curly braces {{}} are template variables, which the system replaces with
-bound values for the variables.</p>
-<pre class="highlight text"># --- templates here ---
-class Profile(Struct):
-  package_version = Default(String, &#39;live&#39;)
-  java_binary = Default(String, &#39;/usr/lib/jvm/java-1.7.0-openjdk/bin/java&#39;)
-  extra_jvm_options = Default(String, &#39;&#39;)
-  parent_environment = Default(String, &#39;prod&#39;)
-  parent_serverset = Default(String,
-                             &#39;/foocorp/service/bird/{{parent_environment}}/bird&#39;)
-
-# --- processes here ---
-main = Process(
-  name = &#39;application&#39;,
-  cmdline = &#39;{{profile.java_binary}} -server -Xmx1792m &#39;
-            &#39;{{profile.extra_jvm_options}} &#39;
-            &#39;-jar application.jar &#39;
-            &#39;-upstreamService {{profile.parent_serverset}}&#39;
-)
-
-# --- tasks ---
-base_task = SequentialTask(
-  name = &#39;application&#39;,
-  processes = [
-    Process(
-      name = &#39;fetch&#39;,
-      cmdline = &#39;curl -O
-              https://packages.foocorp.com/{{profile.package_version}}/application.jar&#39;),
-  ]
-)
-
-    # not always necessary but often useful to have separate task
-    # resource classes
-    staging_task = base_task(resources =
-                     Resources(cpu = 1.0,
-                               ram = 2048*MB,
-                               disk = 1*GB))
-production_task = base_task(resources =
-                        Resources(cpu = 4.0,
-                                  ram = 2560*MB,
-                                  disk = 10*GB))
-
-# --- job template ---
-job_template = Job(
-  name = &#39;application&#39;,
-  role = &#39;myteam&#39;,
-  contact = &#39;myteam-team@foocorp.com&#39;,
-  instances = 20,
-  service = True,
-  task = production_task
-)
-
-# -- profile instantiations (if any) ---
-PRODUCTION = Profile()
-STAGING = Profile(
-  extra_jvm_options = &#39;-Xloggc:gc.log&#39;,
-  parent_environment = &#39;staging&#39;
-)
-
-# -- job instantiations --
-jobs = [
-      job_template(cluster = &#39;cluster1&#39;, environment = &#39;prod&#39;)
-               .bind(profile = PRODUCTION),
-
-      job_template(cluster = &#39;cluster2&#39;, environment = &#39;prod&#39;)
-                .bind(profile = PRODUCTION),
-
-      job_template(cluster = &#39;cluster1&#39;,
-                    environment = &#39;staging&#39;,
-        service = False,
-        task = staging_task,
-        instances = 2)
-        .bind(profile = STAGING),
-]
-</pre>
-<h2 id="defining-process-objects">Defining Process Objects</h2>
-
-<p>Processes are handled by the Thermos system. A process is a single
-executable step run as a part of an Aurora task, which consists of a
-bash-executable statement.</p>
-
-<p>The key (and required) <code>Process</code> attributes are:</p>
-
-<ul>
-<li>  <code>name</code>: Any string which is a valid Unix filename (no slashes,
-NULLs, or leading periods). The <code>name</code> value must be unique relative
-to other Processes in a <code>Task</code>.</li>
-<li>  <code>cmdline</code>: A command line run in a bash subshell, so you can use
-bash scripts. Nothing is supplied for command-line arguments,
-so <code>$*</code> is unspecified.</li>
-</ul>
-
-<p>Many tiny processes make managing configurations more difficult. For
-example, the following is a bad way to define processes.</p>
-<pre class="highlight text">copy = Process(
-  name = &#39;copy&#39;,
-  cmdline = &#39;curl -O https://packages.foocorp.com/app.zip&#39;
-)
-unpack = Process(
-  name = &#39;unpack&#39;,
-  cmdline = &#39;unzip app.zip&#39;
-)
-remove = Process(
-  name = &#39;remove&#39;,
-  cmdline = &#39;rm -f app.zip&#39;
-)
-run = Process(
-  name = &#39;app&#39;,
-  cmdline = &#39;java -jar app.jar&#39;
-)
-run_task = Task(
-  processes = [copy, unpack, remove, run],
-  constraints = order(copy, unpack, remove, run)
-)
-</pre>
-<p>Since <code>cmdline</code> runs in a bash subshell, you can chain commands
-with <code>&amp;&amp;</code> or <code>||</code>.</p>
-
-<p>When defining a <code>Task</code> that is just a list of Processes run in a
-particular order, use <code>SequentialTask</code>, as described in the <a href="#Task"><em>Defining</em>
-<code>Task</code> <em>Objects</em></a> section. The following simplifies and combines the
-above multiple <code>Process</code> definitions into just two.</p>
-<pre class="highlight text">stage = Process(
-  name = &#39;stage&#39;,
-  cmdline = &#39;curl -O https://packages.foocorp.com/app.zip &amp;&amp; &#39;
-            &#39;unzip app.zip &amp;&amp; rm -f app.zip&#39;)
-
-run = Process(name = &#39;app&#39;, cmdline = &#39;java -jar app.jar&#39;)
-
-run_task = SequentialTask(processes = [stage, run])
-</pre>
-<p><code>Process</code> also has five optional attributes, each with a default value
-if one isn&rsquo;t specified in the configuration:</p>
-
-<ul>
-<li><p><code>max_failures</code>: Defaulting to <code>1</code>, the maximum number of failures
-(non-zero exit statuses) before this <code>Process</code> is marked permanently
-failed and not retried. If a <code>Process</code> permanently fails, Thermos
-checks the <code>Process</code> object&rsquo;s containing <code>Task</code> for the task&rsquo;s
-failure limit (usually 1) to determine whether or not the <code>Task</code>
-should be failed. Setting <code>max_failures</code>to <code>0</code> means that this
-process will keep retrying until a successful (zero) exit status is
-achieved. Retries happen at most once every <code>min_duration</code> seconds
-to prevent effectively mounting a denial of service attack against
-the coordinating scheduler.</p></li>
-<li><p><code>daemon</code>: Defaulting to <code>False</code>, if <code>daemon</code> is set to <code>True</code>, a
-successful (zero) exit status does not prevent future process runs.
-Instead, the <code>Process</code> reinvokes after <code>min_duration</code> seconds.
-However, the maximum failure limit (<code>max_failures</code>) still
-applies. A combination of <code>daemon=True</code> and <code>max_failures=0</code> retries
-a <code>Process</code> indefinitely regardless of exit status. This should
-generally be avoided for very short-lived processes because of the
-accumulation of checkpointed state for each process run. When
-running in Aurora, <code>max_failures</code> is capped at
-100.</p></li>
-<li><p><code>ephemeral</code>: Defaulting to <code>False</code>, if <code>ephemeral</code> is <code>True</code>, the
-<code>Process</code>&lsquo; status is not used to determine if its bound <code>Task</code> has
-completed. For example, consider a <code>Task</code> with a
-non-ephemeral webserver process and an ephemeral logsaver process
-that periodically checkpoints its log files to a centralized data
-store. The <code>Task</code> is considered finished once the webserver process
-finishes, regardless of the logsaver&rsquo;s current status.</p></li>
-<li><p><code>min_duration</code>: Defaults to <code>15</code>. Processes may succeed or fail
-multiple times during a single Task. Each result is called a
-<em>process run</em> and this value is the minimum number of seconds the
-scheduler waits before re-running the same process.</p></li>
-<li><p><code>final</code>: Defaulting to <code>False</code>, this is a finalizing <code>Process</code> that
-should run last. Processes can be grouped into two classes:
-<em>ordinary</em> and <em>finalizing</em>. By default, Thermos Processes are
-ordinary. They run as long as the <code>Task</code> is considered
-healthy (i.e. hasn&rsquo;t reached a failure limit). But once all regular
-Thermos Processes have either finished or the <code>Task</code> has reached a
-certain failure threshold, Thermos moves into a <em>finalization</em> stage
-and runs all finalizing Processes. These are typically necessary for
-cleaning up after the <code>Task</code>, such as log checkpointers, or perhaps
-e-mail notifications of a completed Task. Finalizing processes may
-not depend upon ordinary processes or vice-versa, however finalizing
-processes may depend upon other finalizing processes and will
-otherwise run as a typical process schedule.</p></li>
-</ul>
-
-<h2 id="getting-your-code-into-the-sandbox">Getting Your Code Into The Sandbox</h2>
-
-<p>When using Aurora, you need to get your executable code into its &ldquo;sandbox&rdquo;, specifically
-the Task sandbox where the code executes for the Processes that make up that Task.</p>
-
-<p>Each Task has a sandbox created when the Task starts and garbage
-collected when it finishes. All of a Task&rsquo;s processes run in its
-sandbox, so processes can share state by using a shared current
-working directory.</p>
-
-<p>Typically, you save this code somewhere. You then need to define a Process
-in your <code>.aurora</code> configuration file that fetches the code from that somewhere
-to where the slave can see it. For a public cloud, that can be anywhere public on
-the Internet, such as S3. For a private cloud internal storage, you need to put in
-on an accessible HDFS cluster or similar storage.</p>
-
-<p>The template for this Process is:</p>
-<pre class="highlight text">&lt;name&gt; = Process(
-  name = &#39;&lt;name&gt;&#39;
-  cmdline = &#39;&lt;command to copy and extract code archive into current working directory&gt;&#39;
-)
-</pre>
-<p>Note: Be sure the extracted code archive has an executable.</p>
-
-<h2 id="defining-task-objects">Defining Task Objects</h2>
-
-<p>Tasks are handled by Mesos. A task is a collection of processes that
-runs in a shared sandbox. It&rsquo;s the fundamental unit Aurora uses to
-schedule the datacenter; essentially what Aurora does is find places
-in the cluster to run tasks.</p>
-
-<p>The key (and required) parts of a Task are:</p>
-
-<ul>
-<li><p><code>name</code>: A string giving the Task&rsquo;s name. By default, if a Task is
-not given a name, it inherits the first name in its Process list.</p></li>
-<li><p><code>processes</code>: An unordered list of Process objects bound to the Task.
-The value of the optional <code>constraints</code> attribute affects the
-contents as a whole. Currently, the only constraint, <code>order</code>, determines if
-the processes run in parallel or sequentially.</p></li>
-<li><p><code>resources</code>: A <code>Resource</code> object defining the Task&rsquo;s resource
-    footprint. A <code>Resource</code> object has three attributes:
-    -   <code>cpu</code>: A Float, the fractional number of cores the Task
-    requires.
-    -   <code>ram</code>: An Integer, RAM bytes the Task requires.
-    -   <code>disk</code>: An integer, disk bytes the Task requires.</p></li>
-</ul>
-
-<p>A basic Task definition looks like:</p>
-<pre class="highlight text">Task(
-    name=&quot;hello_world&quot;,
-    processes=[Process(name = &quot;hello_world&quot;, cmdline = &quot;echo hello world&quot;)],
-    resources=Resources(cpu = 1.0,
-                        ram = 1*GB,
-                        disk = 1*GB))
-</pre>
-<p>There are four optional Task attributes:</p>
-
-<ul>
-<li><p><code>constraints</code>: A list of <code>Constraint</code> objects that constrain the
-Task&rsquo;s processes. Currently there is only one type, the <code>order</code>
-constraint. For example the following requires that the processes
-run in the order <code>foo</code>, then <code>bar</code>.</p>
-<pre class="highlight text">constraints = [Constraint(order=[&#39;foo&#39;, &#39;bar&#39;])]
-</pre>
-<p>There is an <code>order()</code> function that takes <code>order(&#39;foo&#39;, &#39;bar&#39;, &#39;baz&#39;)</code>
-and converts it into <code>[Constraint(order=[&#39;foo&#39;, &#39;bar&#39;, &#39;baz&#39;])]</code>.
-<code>order()</code> accepts Process name strings <code>(&#39;foo&#39;, &#39;bar&#39;)</code> or the processes
-themselves, e.g. <code>foo=Process(name=&#39;foo&#39;, ...)</code>, <code>bar=Process(name=&#39;bar&#39;, ...)</code>,
-<code>constraints=order(foo, bar)</code></p>
-
-<p>Note that Thermos rejects tasks with process cycles.</p></li>
-<li><p><code>max_failures</code>: Defaulting to <code>1</code>, the number of failed processes
-needed for the <code>Task</code> to be marked as failed. Note how this
-interacts with individual Processes&rsquo; <code>max_failures</code> values. Assume a
-Task has two Processes and a <code>max_failures</code> value of <code>2</code>. So both
-Processes must fail for the Task to fail. Now, assume each of the
-Task&rsquo;s Processes has its own <code>max_failures</code> value of <code>10</code>. If
-Process &ldquo;A&rdquo; fails 5 times before succeeding, and Process &ldquo;B&rdquo; fails
-10 times and is then marked as failing, their parent Task succeeds.
-Even though there were 15 individual failures by its Processes, only
-1 of its Processes was finally marked as failing. Since 1 is less
-than the 2 that is the Task&rsquo;s <code>max_failures</code> value, the Task does
-not fail.</p></li>
-<li><p><code>max_concurrency</code>: Defaulting to <code>0</code>, the maximum number of
-concurrent processes in the Task. <code>0</code> specifies unlimited
-concurrency. For Tasks with many expensive but otherwise independent
-processes, you can limit the amount of concurrency Thermos schedules
-instead of artificially constraining them through <code>order</code>
-constraints. For example, a test framework may generate a Task with
-100 test run processes, but runs it in a Task with
-<code>resources.cpus=4</code>. Limit the amount of parallelism to 4 by setting
-<code>max_concurrency=4</code>.</p></li>
-<li><p><code>finalization_wait</code>: Defaulting to <code>30</code>, the number of seconds
-allocated for finalizing the Task&rsquo;s processes. A Task starts in
-<code>ACTIVE</code> state when Processes run and stays there as long as the Task
-is healthy and Processes run. When all Processes finish successfully
-or the Task reaches its maximum process failure limit, it goes into
-<code>CLEANING</code> state. In <code>CLEANING</code>, it sends <code>SIGTERMS</code> to any still running
-Processes. When all Processes terminate, the Task goes into
-<code>FINALIZING</code> state and invokes the schedule of all processes whose
-final attribute has a True value. Everything from the end of <code>ACTIVE</code>
-to the end of <code>FINALIZING</code> must happen within <code>finalization_wait</code>
-number of seconds. If not, all still running Processes are sent
-<code>SIGKILL</code>s (or if dependent on yet to be completed Processes, are
-never invoked).</p></li>
-</ul>
-
-<h3 id="sequentialtask:-running-processes-in-parallel-or-sequentially">SequentialTask: Running Processes in Parallel or Sequentially</h3>
-
-<p>By default, a Task with several Processes runs them in parallel. There
-are two ways to run Processes sequentially:</p>
-
-<ul>
-<li><p>Include an <code>order</code> constraint in the Task definition&rsquo;s <code>constraints</code>
-attribute whose arguments specify the processes&#39; run order:</p>
-<pre class="highlight text">Task( ... processes=[process1, process2, process3],
-      constraints = order(process1, process2, process3), ...)
-</pre></li>
-<li><p>Use <code>SequentialTask</code> instead of <code>Task</code>; it automatically runs
-processes in the order specified in the <code>processes</code> attribute. No
-<code>constraint</code> parameter is needed:</p>
-<pre class="highlight text">SequentialTask( ... processes=[process1, process2, process3] ...)
-</pre></li>
-</ul>
-
-<h3 id="simpletask">SimpleTask</h3>
-
-<p>For quickly creating simple tasks, use the <code>SimpleTask</code> helper. It
-creates a basic task from a provided name and command line using a
-default set of resources. For example, in a .<code>aurora</code> configuration
-file:</p>
-<pre class="highlight text">SimpleTask(name=&quot;hello_world&quot;, command=&quot;echo hello world&quot;)
-</pre>
-<p>is equivalent to</p>
-<pre class="highlight text">Task(name=&quot;hello_world&quot;,
-     processes=[Process(name = &quot;hello_world&quot;, cmdline = &quot;echo hello world&quot;)],
-     resources=Resources(cpu = 1.0,
-                         ram = 1*GB,
-                         disk = 1*GB))
-</pre>
-<p>The simplest idiomatic Job configuration thus becomes:</p>
-<pre class="highlight text">import os
-hello_world_job = Job(
-  task=SimpleTask(name=&quot;hello_world&quot;, command=&quot;echo hello world&quot;),
-  role=os.getenv(&#39;USER&#39;),
-  cluster=&quot;cluster1&quot;)
-</pre>
-<p>When written to <code>hello_world.aurora</code>, you invoke it with a simple
-<code>aurora create cluster1/$USER/test/hello_world hello_world.aurora</code>.</p>
-
-<h3 id="combining-tasks">Combining tasks</h3>
-
-<p><code>Tasks.concat</code>(synonym,<code>concat_tasks</code>) and
-<code>Tasks.combine</code>(synonym,<code>combine_tasks</code>) merge multiple Task definitions
-into a single Task. It may be easier to define complex Jobs
-as smaller constituent Tasks. But since a Job only includes a single
-Task, the subtasks must be combined before using them in a Job.
-Smaller Tasks can also be reused between Jobs, instead of having to
-repeat their definition for multiple Jobs.</p>
-
-<p>With both methods, the merged Task takes the first Task&rsquo;s name. The
-difference between the two is the result Task&rsquo;s process ordering.</p>
-
-<ul>
-<li><p><code>Tasks.combine</code> runs its subtasks&#39; processes in no particular order.
-The new Task&rsquo;s resource consumption is the sum of all its subtasks&#39;
-consumption.</p></li>
-<li><p><code>Tasks.concat</code> runs its subtasks in the order supplied, with each
-subtask&rsquo;s processes run serially between tasks. It is analogous to
-the <code>order</code> constraint helper, except at the Task level instead of
-the Process level. The new Task&rsquo;s resource consumption is the
-maximum value specified by any subtask for each Resource attribute
-(cpu, ram and disk).</p></li>
-</ul>
-
-<p>For example, given the following:</p>
-<pre class="highlight text">setup_task = Task(
-  ...
-  processes=[download_interpreter, update_zookeeper],
-  # It is important to note that {{Tasks.concat}} has
-  # no effect on the ordering of the processes within a task;
-  # hence the necessity of the {{order}} statement below
-  # (otherwise, the order in which {{download_interpreter}}
-  # and {{update_zookeeper}} run will be non-deterministic)
-  constraints=order(download_interpreter, update_zookeeper),
-  ...
-)
-
-run_task = SequentialTask(
-  ...
-  processes=[download_application, start_application],
-  ...
-)
-
-combined_task = Tasks.concat(setup_task, run_task)
-</pre>
-<p>The <code>Tasks.concat</code> command merges the two Tasks into a single Task and
-ensures all processes in <code>setup_task</code> run before the processes
-in <code>run_task</code>. Conceptually, the task is reduced to:</p>
-<pre class="highlight text">task = Task(
-  ...
-  processes=[download_interpreter, update_zookeeper,
-             download_application, start_application],
-  constraints=order(download_interpreter, update_zookeeper,
-                    download_application, start_application),
-  ...
-)
-</pre>
-<p>In the case of <code>Tasks.combine</code>, the two schedules run in parallel:</p>
-<pre class="highlight text">task = Task(
-  ...
-  processes=[download_interpreter, update_zookeeper,
-             download_application, start_application],
-  constraints=order(download_interpreter, update_zookeeper) +
-                    order(download_application, start_application),
-  ...
-)
-</pre>
-<p>In the latter case, each of the two sequences may operate in parallel.
-Of course, this may not be the intended behavior (for example, if
-the <code>start_application</code> Process implicitly relies
-upon <code>download_interpreter</code>). Make sure you understand the difference
-between using one or the other.</p>
-
-<h2 id="defining-job-objects">Defining Job Objects</h2>
-
-<p>A job is a group of identical tasks that Aurora can run in a Mesos cluster.</p>
-
-<p>A <code>Job</code> object is defined by the values of several attributes, some
-required and some optional. The required attributes are:</p>
-
-<ul>
-<li><p><code>task</code>: Task object to bind to this job. Note that a Job can
-only take a single Task.</p></li>
-<li><p><code>role</code>: Job&rsquo;s role account; in other words, the user account to run
-the job as on a Mesos cluster machine. A common value is
-<code>os.getenv(&#39;USER&#39;)</code>; using a Python command to get the user who
-submits the job request. The other common value is the service
-account that runs the job, e.g. <code>www-data</code>.</p></li>
-<li><p><code>environment</code>: Job&rsquo;s environment, typical values
-are <code>devel</code>, <code>test</code>, or <code>prod</code>.</p></li>
-<li><p><code>cluster</code>: Aurora cluster to schedule the job in, defined in
-<code>/etc/aurora/clusters.json</code> or <code>~/.clusters.json</code>. You can specify
-jobs where the only difference is the <code>cluster</code>, then at run time
-only run the Job whose job key includes your desired cluster&rsquo;s name.</p></li>
-</ul>
-
-<p>You usually see a <code>name</code> parameter. By default, <code>name</code> inherits its
-value from the Job&rsquo;s associated Task object, but you can override this
-default. For these four parameters, a Job definition might look like:</p>
-<pre class="highlight text">foo_job = Job( name = &#39;foo&#39;, cluster = &#39;cluster1&#39;,
-          role = os.getenv(&#39;USER&#39;), environment = &#39;prod&#39;,
-          task = foo_task)
-</pre>
-<p>In addition to the required attributes, there are several optional
-attributes. The first (strongly recommended) optional attribute is:</p>
-
-<ul>
-<li>  <code>contact</code>: An email address for the Job&rsquo;s owner. For production
-jobs, it is usually a team mailing list.</li>
-</ul>
-
-<p>Two more attributes deal with how to handle failure of the Job&rsquo;s Task:</p>
-
-<ul>
-<li><p><code>max_task_failures</code>: An integer, defaulting to <code>1</code>, of the maximum
-number of Task failures after which the Job is considered failed.
-<code>-1</code> allows for infinite failures.</p></li>
-<li><p><code>service</code>: A boolean, defaulting to <code>False</code>, which if <code>True</code>
-restarts tasks regardless of whether they succeeded or failed. In
-other words, if <code>True</code>, after the Job&rsquo;s Task completes, it
-automatically starts again. This is for Jobs you want to run
-continuously, rather than doing a single run.</p></li>
-</ul>
-
-<p>Three attributes deal with configuring the Job&rsquo;s Task:</p>
-
-<ul>
-<li><p><code>instances</code>: Defaulting to <code>1</code>, the number of
-instances/replicas/shards of the Job&rsquo;s Task to create.</p></li>
-<li><p><code>priority</code>: Defaulting to <code>0</code>, the Job&rsquo;s Task&rsquo;s preemption priority,
-for which higher values may preempt Tasks from Jobs with lower
-values.</p></li>
-<li><p><code>production</code>: a Boolean, defaulting to <code>False</code>, specifying that this
-is a production job backed by quota. Tasks from production Jobs may
-preempt tasks from any non-production job, and may only be preempted
-by tasks from production jobs in the same role with higher
-priority. <strong>WARNING</strong>: To run Jobs at this level, the Job role must
-have the appropriate quota.</p></li>
-</ul>
-
-<p>The final three Job attributes each take an object as their value.</p>
-
-<ul>
-<li>  <code>update_config</code>: An <code>UpdateConfig</code>
-object provides parameters for controlling the rate and policy of
-rolling updates. The <code>UpdateConfig</code> parameters are:
-
-<ul>
-<li>  <code>batch_size</code>: An integer, defaulting to <code>1</code>, specifying the
-maximum number of shards to update in one iteration.</li>
-<li>  <code>restart_threshold</code>: An integer, defaulting to <code>60</code>, specifying
-the maximum number of seconds before a shard must move into the
-<code>RUNNING</code> state before considered a failure.</li>
-<li>  <code>watch_secs</code>: An integer, defaulting to <code>45</code>, specifying the
-minimum number of seconds a shard must remain in the <code>RUNNING</code>
-state before considered a success.</li>
-<li>  <code>max_per_shard_failures</code>: An integer, defaulting to <code>0</code>,
-specifying the maximum number of restarts per shard during an
-update. When the limit is exceeded, it increments the total
-failure count.</li>
-<li>  <code>max_total_failures</code>: An integer, defaulting to <code>0</code>, specifying
-the maximum number of shard failures tolerated during an update.
-Cannot be equal to or greater than the job&rsquo;s total number of
-tasks.</li>
-</ul></li>
-<li>  <code>health_check_config</code>: A <code>HealthCheckConfig</code> object that provides
-parameters for controlling a Task&rsquo;s health checks via HTTP. Only
-used if a health port was assigned with a command line wildcard. The
-<code>HealthCheckConfig</code> parameters are:
-
-<ul>
-<li>  <code>initial_interval_secs</code>: An integer, defaulting to <code>15</code>,
-specifying the initial delay for doing an HTTP health check.</li>
-<li>  <code>interval_secs</code>: An integer, defaulting to <code>10</code>, specifying the
-number of seconds in the interval between checking the Task&rsquo;s
-health.</li>
-<li>  <code>timeout_secs</code>: An integer, defaulting to <code>1</code>, specifying the
-number of seconds the application must respond to an HTTP health
-check with <code>OK</code> before it is considered a failure.</li>
-<li>  <code>max_consecutive_failures</code>: An integer, defaulting to <code>0</code>,
-specifying the maximum number of consecutive failures before a
-task is unhealthy.</li>
-</ul></li>
-<li>  <code>constraints</code>: A <code>dict</code> Python object, specifying Task scheduling
-constraints. Most users will not need to specify constraints, as the
-scheduler automatically inserts reasonable defaults. Please do not
-set this field unless you are sure of what you are doing. See the
-section in the Aurora + Thermos Reference manual on <a href="/documentation/latest/configuration-reference/">Specifying
-Scheduling Constraints</a> for more information.</li>
-</ul>
-
-<h2 id="the-jobs-list">The jobs List</h2>
-
-<p>At the end of your <code>.aurora</code> file, you need to specify a list of the
-file&rsquo;s defined Jobs to run in the order listed. For example, the
-following runs first <code>job1</code>, then <code>job2</code>, then <code>job3</code>.</p>
-
-<p>jobs = [job1, job2, job3]</p>
-
-<h2 id="templating">Templating</h2>
-
-<p>The <code>.aurora</code> file format is just Python. However, <code>Job</code>, <code>Task</code>,
-<code>Process</code>, and other classes are defined by a templating library called
-<em>Pystachio</em>, a powerful tool for configuration specification and reuse.</p>
-
-<p><a href="/documentation/latest/configuration-reference/">Aurora+Thermos Configuration Reference</a>
-has a full reference of all Aurora/Thermos defined Pystachio objects.</p>
-
-<p>When writing your <code>.aurora</code> file, you may use any Pystachio datatypes, as
-well as any objects shown in the <em>Aurora+Thermos Configuration
-Reference</em> without <code>import</code> statements - the Aurora config loader
-injects them automatically. Other than that the <code>.aurora</code> format
-works like any other Python script.</p>
-
-<h3 id="templating-1:-binding-in-pystachio">Templating 1: Binding in Pystachio</h3>
-
-<p>Pystachio uses the visually distinctive {{}} to indicate template
-variables. These are often called &ldquo;mustache variables&rdquo; after the
-similarly appearing variables in the Mustache templating system and
-because the curly braces resemble mustaches.</p>
-
-<p>If you are familiar with the Mustache system, templates in Pystachio
-have significant differences. They have no nesting, joining, or
-inheritance semantics. On the other hand, when evaluated, templates
-are evaluated iteratively, so this affords some level of indirection.</p>
-
-<p>Let&rsquo;s start with the simplest template; text with one
-variable, in this case <code>name</code>;</p>
-<pre class="highlight text">Hello {{name}}
-</pre>
-<p>If we evaluate this as is, we&rsquo;d get back:</p>
-<pre class="highlight text">Hello
-</pre>
-<p>If a template variable doesn&rsquo;t have a value, when evaluated it&rsquo;s
-replaced with nothing. If we add a binding to give it a value:</p>
-<pre class="highlight json"><span class="p">{</span><span class="w"> </span><span class="s2">&quot;name&quot;</span><span class="w"> </span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;Tom&quot;</span><span class="w"> </span><span class="p">}</span><span class="w">
-</span></pre>
-<p>We&rsquo;d get back:</p>
-<pre class="highlight text">Hello Tom
-</pre>
-<p>We can also use {{}} variables as sectional variables. Let&rsquo;s say we
-have:</p>
-<pre class="highlight text">{{#x}} Testing... {{/x}}
-</pre>
-<p>If <code>x</code> evaluates to <code>True</code>, the text between the sectional tags is
-shown. If there is no value for <code>x</code> or it evaluates to <code>False</code>, the
-between tags text is not shown. So, at a basic level, a sectional
-variable acts as a conditional.</p>
-
-<p>However, if the sectional variable evaluates to a list, array, etc. it
-acts as a <code>foreach</code>. For example,</p>
-<pre class="highlight text">{{#x}} {{name}} {{/x}}
-</pre>
-<p>with</p>
-<pre class="highlight json"><span class="p">{</span><span class="w"> </span><span class="s2">&quot;x&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">[</span><span class="w"> </span><span class="p">{</span><span class="w"> </span><span class="s2">&quot;name&quot;</span><span class="w"> </span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;tic&quot;</span><span class="w"> </span><span class="p">}</span><span class="w"> </span><span class="p">{</span><span class="w"> </span><span class="s2">&quot;name&quot;</span><span class="w"> </span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;tac&quot;</span><span class="w"> </span><span class="p">}</span><span class="w"> </span><span class="p">{</span><span class="w"> </span><span class="s2">&quot;name&quot;</span><span class="w"> </span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;toe&quot;</span><span class="w"> </span><span class="p">}</span><span class="w"> </span><span class="p">]</span><span class="w"> </span><span class="p">}</span><span class="w">
-</span></pre>
-<p>evaluates to</p>
-<pre class="highlight text">tic tac toe
-</pre>
-<p>Every Pystachio object has an associated <code>.bind</code> method that can bind
-values to {{}} variables. Bindings are not immediately evaluated.
-Instead, they are evaluated only when the interpolated value of the
-object is necessary, e.g. for performing equality or serializing a
-message over the wire.</p>
-
-<p>Objects with and without mustache templated variables behave
-differently:</p>
-<pre class="highlight text">&gt;&gt;&gt; Float(1.5)
-Float(1.5)
-
-&gt;&gt;&gt; Float(&#39;{{x}}.5&#39;)
-Float({{x}}.5)
-
-&gt;&gt;&gt; Float(&#39;{{x}}.5&#39;).bind(x = 1)
-Float(1.5)
-
-&gt;&gt;&gt; Float(&#39;{{x}}.5&#39;).bind(x = 1) == Float(1.5)
-True
-
-&gt;&gt;&gt; contextual_object = String(&#39;{{metavar{{number}}}}&#39;).bind(
-... metavar1 = &quot;first&quot;, metavar2 = &quot;second&quot;)
-
-&gt;&gt;&gt; contextual_object
-String({{metavar{{number}}}})
-
-&gt;&gt;&gt; contextual_object.bind(number = 1)
-String(first)
-
-&gt;&gt;&gt; contextual_object.bind(number = 2)
-String(second)
-</pre>
-<p>You usually bind simple key to value pairs, but you can also bind three
-other objects: lists, dictionaries, and structurals. These will be
-described in detail later.</p>
-
-<h3 id="structurals-in-pystachio-/-aurora">Structurals in Pystachio / Aurora</h3>
-
-<p>Most Aurora/Thermos users don&rsquo;t ever (knowingly) interact with <code>String</code>,
-<code>Float</code>, or <code>Integer</code> Pystashio objects directly. Instead they interact
-with derived structural (<code>Struct</code>) objects that are collections of
-fundamental and structural objects. The structural object components are
-called <em>attributes</em>. Aurora&rsquo;s most used structural objects are <code>Job</code>,
-<code>Task</code>, and <code>Process</code>:</p>
-<pre class="highlight text">class Process(Struct):
-  cmdline = Required(String)
-  name = Required(String)
-  max_failures = Default(Integer, 1)
-  daemon = Default(Boolean, False)
-  ephemeral = Default(Boolean, False)
-  min_duration = Default(Integer, 5)
-  final = Default(Boolean, False)
-</pre>
-<p>Construct default objects by following the object&rsquo;s type with (). If you
-want an attribute to have a value different from its default, include
-the attribute name and value inside the parentheses.</p>
-<pre class="highlight text">&gt;&gt;&gt; Process()
-Process(daemon=False, max_failures=1, ephemeral=False,
-  min_duration=5, final=False)
-</pre>
-<p>Attribute values can be template variables, which then receive specific
-values when creating the object.</p>
-<pre class="highlight text">&gt;&gt;&gt; Process(cmdline = &#39;echo {{message}}&#39;)
-Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
-        cmdline=echo {{message}}, final=False)
-
-&gt;&gt;&gt; Process(cmdline = &#39;echo {{message}}&#39;).bind(message = &#39;hello world&#39;)
-Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
-        cmdline=echo hello world, final=False)
-</pre>
-<p>A powerful binding property is that all of an object&rsquo;s children inherit its
-bindings:</p>
-<pre class="highlight text">&gt;&gt;&gt; List(Process)([
-... Process(name = &#39;{{prefix}}_one&#39;),
-... Process(name = &#39;{{prefix}}_two&#39;)
-... ]).bind(prefix = &#39;hello&#39;)
-ProcessList(
-  Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
-  Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
-  )
-</pre>
-<p>Remember that an Aurora Job contains Tasks which contain Processes. A
-Job level binding is inherited by its Tasks and all their Processes.
-Similarly a Task level binding is available to that Task and its
-Processes but is <em>not</em> visible at the Job level (inheritance is a
-one-way street.)</p>
-
-<h4 id="mustaches-within-structurals">Mustaches Within Structurals</h4>
-
-<p>When you define a <code>Struct</code> schema, one powerful, but confusing, feature
-is that all of that structure&rsquo;s attributes are Mustache variables within
-the enclosing scope <em>once they have been populated</em>.</p>
-
-<p>For example, when <code>Process</code> is defined above, all its attributes such as
-{{<code>name</code>}}, {{<code>cmdline</code>}}, {{<code>max_failures</code>}} etc., are all immediately
-defined as Mustache variables, implicitly bound into the <code>Process</code>, and
-inherit all child objects once they are defined.</p>
-
-<p>Thus, you can do the following:</p>
-<pre class="highlight text">&gt;&gt;&gt; Process(name = &quot;installer&quot;, cmdline = &quot;echo {{name}} is running&quot;)
-Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
-        cmdline=echo installer is running, final=False)
-</pre>
-<p>WARNING: This binding only takes place in one direction. For example,
-the following does NOT work and does not set the <code>Process</code> <code>name</code>
-attribute&rsquo;s value.</p>
-<pre class="highlight text">&gt;&gt;&gt; Process().bind(name = &quot;installer&quot;)
-Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
-</pre>
-<p>The following is also not possible and results in an infinite loop that
-attempts to resolve <code>Process.name</code>.</p>
-<pre class="highlight text">&gt;&gt;&gt; Process(name = &#39;{{name}}&#39;).bind(name = &#39;installer&#39;)
-</pre>
-<p>Do not confuse Structural attributes with bound Mustache variables.
-Attributes are implicitly converted to Mustache variables but not vice
-versa.</p>
-
-<h3 id="templating-2:-structurals-are-factories">Templating 2: Structurals Are Factories</h3>
-
-<h4 id="a-second-way-of-templating">A Second Way of Templating</h4>
-
-<p>A second templating method is both as powerful as the aforementioned and
-often confused with it. This method is due to automatic conversion of
-Struct attributes to Mustache variables as described above.</p>
-
-<p>Suppose you create a Process object:</p>
-<pre class="highlight text">&gt;&gt;&gt; p = Process(name = &quot;process_one&quot;, cmdline = &quot;echo hello world&quot;)
-
-&gt;&gt;&gt; p
-Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
-        cmdline=echo hello world, final=False)
-</pre>
-<p>This <code>Process</code> object, &ldquo;<code>p</code>&rdquo;, can be used wherever a <code>Process</code> object is
-needed. It can also be reused by changing the value(s) of its
-attribute(s). Here we change its <code>name</code> attribute from <code>process_one</code> to
-<code>process_two</code>.</p>
-<pre class="highlight text">&gt;&gt;&gt; p(name = &quot;process_two&quot;)
-Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
-        cmdline=echo hello world, final=False)
-</pre>
-<p>Template creation is a common use for this technique:</p>
-<pre class="highlight text">&gt;&gt;&gt; Daemon = Process(daemon = True)
-&gt;&gt;&gt; logrotate = Daemon(name = &#39;logrotate&#39;, cmdline = &#39;./logrotate conf/logrotate.conf&#39;)
-&gt;&gt;&gt; mysql = Daemon(name = &#39;mysql&#39;, cmdline = &#39;bin/mysqld --safe-mode&#39;)
-</pre>
-<h3 id="advanced-binding">Advanced Binding</h3>
-
-<p>As described above, <code>.bind()</code> binds simple strings or numbers to
-Mustache variables. In addition to Structural types formed by combining
-atomic types, Pystachio has two container types; <code>List</code> and <code>Map</code> which
-can also be bound via <code>.bind()</code>.</p>
-
-<h4 id="bind-syntax">Bind Syntax</h4>
-
-<p>The <code>bind()</code> function can take Python dictionaries or <code>kwargs</code>
-interchangeably (when &ldquo;<code>kwargs</code>&rdquo; is in a function definition, <code>kwargs</code>
-receives a Python dictionary containing all keyword arguments after the
-formal parameter list).</p>
-<pre class="highlight text">&gt;&gt;&gt; String(&#39;{{foo}}&#39;).bind(foo = &#39;bar&#39;) == String(&#39;{{foo}}&#39;).bind({&#39;foo&#39;: &#39;bar&#39;})
-True
-</pre>
-<p>Bindings done &ldquo;closer&rdquo; to the object in question take precedence:</p>
-<pre class="highlight text">&gt;&gt;&gt; p = Process(name = &#39;{{context}}_process&#39;)
-&gt;&gt;&gt; t = Task().bind(context = &#39;global&#39;)
-&gt;&gt;&gt; t(processes = [p, p.bind(context = &#39;local&#39;)])
-Task(processes=ProcessList(
-  Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
-          min_duration=5),
-  Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
-          min_duration=5)
-))
-</pre>
-<h4 id="binding-complex-objects">Binding Complex Objects</h4>
-
-<h5 id="lists">Lists</h5>
-<pre class="highlight text">&gt;&gt;&gt; fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
-&gt;&gt;&gt; String(&#39;{{fib[4]}}&#39;).bind(fib = fibonacci)
-String(5)
-</pre>
-<h5 id="maps">Maps</h5>
-<pre class="highlight text">&gt;&gt;&gt; first_names = Map(String, String)({&#39;Kent&#39;: &#39;Clark&#39;, &#39;Wayne&#39;: &#39;Bruce&#39;, &#39;Prince&#39;: &#39;Diana&#39;})
-&gt;&gt;&gt; String(&#39;{{first[Kent]}}&#39;).bind(first = first_names)
-String(Clark)
-</pre>
-<h5 id="structurals">Structurals</h5>
-<pre class="highlight text">&gt;&gt;&gt; String(&#39;{{p.cmdline}}&#39;).bind(p = Process(cmdline = &quot;echo hello world&quot;))
-String(echo hello world)
-</pre>
-<h3 id="structural-binding">Structural Binding</h3>
-
-<p>Use structural templates when binding more than two or three individual
-values at the Job or Task level. For fewer than two or three, standard
-key to string binding is sufficient.</p>
-
-<p>Structural binding is a very powerful pattern and is most useful in
-Aurora/Thermos for doing Structural configuration. For example, you can
-define a job profile. The following profile uses <code>HDFS</code>, the Hadoop
-Distributed File System, to designate a file&rsquo;s location. <code>HDFS</code> does
-not come with Aurora, so you&rsquo;ll need to either install it separately
-or change the way the dataset is designated.</p>
-<pre class="highlight text">class Profile(Struct):
-  version = Required(String)
-  environment = Required(String)
-  dataset = Default(String, hdfs://home/aurora/data/{{environment}}&#39;)
-
-PRODUCTION = Profile(version = &#39;live&#39;, environment = &#39;prod&#39;)
-DEVEL = Profile(version = &#39;latest&#39;,
-                environment = &#39;devel&#39;,
-                dataset = &#39;hdfs://home/aurora/data/test&#39;)
-TEST = Profile(version = &#39;latest&#39;, environment = &#39;test&#39;)
-
-JOB_TEMPLATE = Job(
-  name = &#39;application&#39;,
-  role = &#39;myteam&#39;,
-  cluster = &#39;cluster1&#39;,
-  environment = &#39;{{profile.environment}}&#39;,
-  task = SequentialTask(
-    name = &#39;task&#39;,
-    resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
-    processes = [
-  Process(name = &#39;main&#39;, cmdline = &#39;java -jar application.jar -hdfsPath
-             {{profile.dataset}}&#39;)
-    ]
-   )
- )
-
-jobs = [
-  JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
-  JOB_TEMPLATE.bind(profile = DEVEL),
-  JOB_TEMPLATE.bind(profile = TEST),
- ]
-</pre>
-<p>In this case, a custom structural &ldquo;Profile&rdquo; is created to self-document
-the configuration to some degree. This also allows some schema
-&ldquo;type-checking&rdquo;, and for default self-substitution, e.g. in
-<code>Profile.dataset</code> above.</p>
-
-<p>So rather than a <code>.bind()</code> with a half-dozen substituted variables, you
-can bind a single object that has sensible defaults stored in a single
-place.</p>
-
-<h2 id="configuration-file-writing-tips-and-best-practices">Configuration File Writing Tips And Best Practices</h2>
-
-<h3 id="use-as-few-.aurora-files-as-possible">Use As Few .aurora Files As Possible</h3>
-
-<p>When creating your <code>.aurora</code> configuration, try to keep all versions of
-a particular job within the same <code>.aurora</code> file. For example, if you
-have separate jobs for <code>cluster1</code>, <code>cluster1</code> staging, <code>cluster1</code>
-testing, and<code>cluster2</code>, keep them as close together as possible.</p>
-
-<p>Constructs shared across multiple jobs owned by your team (e.g.
-team-level defaults or structural templates) can be split into separate
-<code>.aurora</code>files and included via the <code>include</code> directive.</p>
-
-<h3 id="avoid-boilerplate">Avoid Boilerplate</h3>
-
-<p>If you see repetition or find yourself copy and pasting any parts of
-your configuration, it&rsquo;s likely an opportunity for templating. Take the
-example below:</p>
-
-<p><code>redundant.aurora</code> contains:</p>
-<pre class="highlight text">download = Process(
-  name = &#39;download&#39;,
-  cmdline = &#39;wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2&#39;,
-  max_failures = 5,
-  min_duration = 1)
-
-unpack = Process(
-  name = &#39;unpack&#39;,
-  cmdline = &#39;rm -rf Python-2.7.3 &amp;&amp; tar xzf Python-2.7.3.tar.bz2&#39;,
-  max_failures = 5,
-  min_duration = 1)
-
-build = Process(
-  name = &#39;build&#39;,
-  cmdline = &#39;pushd Python-2.7.3 &amp;&amp; ./configure &amp;&amp; make &amp;&amp; popd&#39;,
-  max_failures = 1)
-
-email = Process(
-  name = &#39;email&#39;,
-  cmdline = &#39;echo Success | mail feynman@tmc.com&#39;,
-  max_failures = 5,
-  min_duration = 1)
-
-build_python = Task(
-  name = &#39;build_python&#39;,
-  processes = [download, unpack, build, email],
-  constraints = [Constraint(order = [&#39;download&#39;, &#39;unpack&#39;, &#39;build&#39;, &#39;email&#39;])])
-</pre>
-<p>As you&rsquo;ll notice, there&rsquo;s a lot of repetition in the <code>Process</code>
-definitions. For example, almost every process sets a <code>max_failures</code>
-limit to 5 and a <code>min_duration</code> to 1. This is an opportunity for factoring
-into a common process template.</p>
-
-<p>Furthermore, the Python version is repeated everywhere. This can be
-bound via structural templating as described in the <a href="#AdvancedBinding">Advanced Binding</a>
-section.</p>
-
-<p><code>less_redundant.aurora</code> contains:</p>
-<pre class="highlight text">class Python(Struct):
-  version = Required(String)
-  base = Default(String, &#39;Python-{{version}}&#39;)
-  package = Default(String, &#39;{{base}}.tar.bz2&#39;)
-
-ReliableProcess = Process(
-  max_failures = 5,
-  min_duration = 1)
-
-download = ReliableProcess(
-  name = &#39;download&#39;,
-  cmdline = &#39;wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}&#39;)
-
-unpack = ReliableProcess(
-  name = &#39;unpack&#39;,
-  cmdline = &#39;rm -rf {{python.base}} &amp;&amp; tar xzf {{python.package}}&#39;)
-
-build = ReliableProcess(
-  name = &#39;build&#39;,
-  cmdline = &#39;pushd {{python.base}} &amp;&amp; ./configure &amp;&amp; make &amp;&amp; popd&#39;,
-  max_failures = 1)
-
-email = ReliableProcess(
-  name = &#39;email&#39;,
-  cmdline = &#39;echo Success | mail {{role}}@foocorp.com&#39;)
-
-build_python = SequentialTask(
-  name = &#39;build_python&#39;,
-  processes = [download, unpack, build, email]).bind(python = Python(version = &quot;2.7.3&quot;))
-</pre>
-<h3 id="thermos-uses-bash,-but-thermos-is-not-bash">Thermos Uses bash, But Thermos Is Not bash</h3>
-
-<h4 id="bad">Bad</h4>
-
-<p>Many tiny Processes makes for harder to manage configurations.</p>
-<pre class="highlight text">copy = Process(
-  name = &#39;copy&#39;,
-  cmdline = &#39;rcp user@my_machine:my_application .&#39;
- )
-
- unpack = Process(
-   name = &#39;unpack&#39;,
-   cmdline = &#39;unzip app.zip&#39;
- )
-
- remove = Process(
-   name = &#39;remove&#39;,
-   cmdline = &#39;rm -f app.zip&#39;
- )
-
- run = Process(
-   name = &#39;app&#39;,
-   cmdline = &#39;java -jar app.jar&#39;
- )
-
- run_task = Task(
-   processes = [copy, unpack, remove, run],
-   constraints = order(copy, unpack, remove, run)
- )
-</pre>
-<h4 id="good">Good</h4>
-
-<p>Each <code>cmdline</code> runs in a bash subshell, so you have the full power of
-bash. Chaining commands with <code>&amp;&amp;</code> or <code>||</code> is almost always the right
-thing to do.</p>
-
-<p>Also for Tasks that are simply a list of processes that run one after
-another, consider using the <code>SequentialTask</code> helper which applies a
-linear ordering constraint for you.</p>
-<pre class="highlight text">stage = Process(
-  name = &#39;stage&#39;,
-  cmdline = &#39;rcp user@my_machine:my_application . &amp;&amp; unzip app.zip &amp;&amp; rm -f app.zip&#39;)
-
-run = Process(name = &#39;app&#39;, cmdline = &#39;java -jar app.jar&#39;)
-
-run_task = SequentialTask(processes = [stage, run])
-</pre>
-<h3 id="rarely-use-functions-in-your-configurations">Rarely Use Functions In Your Configurations</h3>
-
-<p>90% of the time you define a function in a <code>.aurora</code> file, you&rsquo;re
-probably Doing It Wrong&trade;.</p>
-
-<h4 id="bad">Bad</h4>
-<pre class="highlight text">def get_my_task(name, user, cpu, ram, disk):
-  return Task(
-    name = name,
-    user = user,
-    processes = [STAGE_PROCESS, RUN_PROCESS],
-    constraints = order(STAGE_PROCESS, RUN_PROCESS),
-    resources = Resources(cpu = cpu, ram = ram, disk = disk)
- )
-
- task_one = get_my_task(&#39;task_one&#39;, &#39;feynman&#39;, 1.0, 32*MB, 1*GB)
- task_two = get_my_task(&#39;task_two&#39;, &#39;feynman&#39;, 2.0, 64*MB, 1*GB)
-</pre>
-<h4 id="good">Good</h4>
-
-<p>This one is more idiomatic. Forced keyword arguments prevents accidents,
-e.g. constructing a task with &ldquo;32*MB&rdquo; when you mean 32MB of ram and not
-disk. Less proliferation of task-construction techniques means
-easier-to-read, quicker-to-understand, and a more composable
-configuration.</p>
-<pre class="highlight text">TASK_TEMPLATE = SequentialTask(
-  user = &#39;wickman&#39;,
-  processes = [STAGE_PROCESS, RUN_PROCESS],
-)
-
-task_one = TASK_TEMPLATE(
-  name = &#39;task_one&#39;,
-  resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
-
-task_two = TASK_TEMPLATE(
-  name = &#39;task_two&#39;,
-  resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
-)
-</pre>
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/contributing/index.html b/publish/documentation/latest/contributing/index.html
deleted file mode 100644
index 4a461de..0000000
--- a/publish/documentation/latest/contributing/index.html
+++ /dev/null
@@ -1,133 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h2 id="get-the-source-code">Get the Source Code</h2>
-
-<p>First things first, you&rsquo;ll need the source! The Aurora source is available from Apache git:</p>
-<pre class="highlight text">git clone https://git-wip-us.apache.org/repos/asf/incubator-aurora
-</pre>
-<h2 id="find-something-to-do">Find Something to Do</h2>
-
-<p>There are issues in <a href="https://issues.apache.org/jira/browse/AURORA">Jira</a> with the
-<a href="https://issues.apache.org/jira/browse/AURORA-189?jql=project%20%3D%20AURORA%20AND%20resolution%20%3D%20Unresolved%20AND%20labels%20%3D%20newbie%20ORDER%20BY%20priority%20DESC">&ldquo;newbie&rdquo; tag</a>
-that are good starting places for new Aurora contributors; pick one of these and dive in! Once
-you&rsquo;ve got a patch, the next step is to post a review.</p>
-
-<h2 id="getting-your-reviewboard-account">Getting your ReviewBoard Account</h2>
-
-<p>Go to <a href="https://reviews.apache.org">https://reviews.apache.org</a> and create an account.</p>
-
-<h2 id="setting-up-your-reviewboard-environment">Setting up your ReviewBoard Environment</h2>
-
-<p>Run <code>./rbt status</code>. The first time this runs it will bootstrap and you will be asked to login.
-Subsequent runs will cache your login credentials.</p>
-
-<h2 id="submitting-a-patch-for-review">Submitting a Patch for Review</h2>
-
-<p>Post a review with <code>rbt</code>, fill out the fields in your browser and hit Publish.</p>
-<pre class="highlight text">./rbt post -o
-</pre>
-<p>Once you&rsquo;ve done this, you probably want to mark the associated Jira issue as Reviewable.</p>
-
-<h2 id="updating-an-existing-review">Updating an Existing Review</h2>
-
-<p>Incorporate review feedback, make some more commits, update your existing review, fill out the
-fields in your browser and hit Publish.</p>
-<pre class="highlight text">./rbt post -o -r &lt;RB_ID&gt;
-</pre>
-<h2 id="merging-your-own-review-(committers)">Merging Your Own Review (Committers)</h2>
-
-<p>Once you have shipits from the right committers, merge your changes in a single commit and mark
-the review as submitted. The typical workflow is:</p>
-<pre class="highlight text">git checkout master
-git pull origin master
-./rbt patch -c &lt;RB_ID&gt;  # Verify the automatically-generated commit message looks sane,
-                        # editing if necessary.
-git show master         # Verify everything looks sane
-git push origin master
-./rbt close &lt;RB_ID&gt;
-</pre>
-<p>Note that even if you&rsquo;re developing using feature branches you will not use <code>git merge</code> - each
-commit will be an atomic change accompanied by a ReviewBoard entry.</p>
-
-<h2 id="merging-someone-else&#39;s-review">Merging Someone Else&rsquo;s Review</h2>
-
-<p>Sometimes you&rsquo;ll need to merge someone else&rsquo;s RB. The typical workflow for this is</p>
-<pre class="highlight text">git checkout master
-git pull origin master
-./rbt patch -c &lt;RB_ID&gt;
-git show master  # Verify everything looks sane, author is correct
-git push origin master
-</pre>
-<h2 id="cleaning-up">Cleaning Up</h2>
-
-<p>Your patch has landed, congratulations! The last thing you&rsquo;ll want to do before moving on to your
-next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
-&ldquo;Resolved&rdquo; while the latter should be marked as &ldquo;Submitted&rdquo;.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/cron-jobs/index.html b/publish/documentation/latest/cron-jobs/index.html
deleted file mode 100644
index e540831..0000000
--- a/publish/documentation/latest/cron-jobs/index.html
+++ /dev/null
@@ -1,215 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="cron-jobs">Cron Jobs</h1>
-
-<p>Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.</p>
-
-<ul>
-<li><a href="#overview">Overview</a></li>
-<li><a href="#collision-policies">Collision Policies</a>
-
-<ul>
-<li><a href="#kill_existing">KILL_EXISTING</a></li>
-<li><a href="#cancel_new">CANCEL_NEW</a></li>
-</ul></li>
-<li><a href="#failure-recovery">Failure recovery</a></li>
-<li><a href="#interacting-with-cron-jobs-via-the-aurora-cli">Interacting with cron jobs via the Aurora CLI</a>
-
-<ul>
-<li><a href="#cron-schedule">cron schedule</a></li>
-<li><a href="#cron-deschedule">cron deschedule</a></li>
-<li><a href="#cron-start">cron start</a></li>
-<li><a href="#job-killall-job-restart-job-kill">job killall, job restart, job kill</a></li>
-</ul></li>
-<li><a href="#technical-note-about-syntax">Technical Note About Syntax</a></li>
-<li><a href="#caveats">Caveats</a>
-
-<ul>
-<li><a href="#failovers">Failovers</a></li>
-<li><a href="#collision-policy-is-best-effort">Collision policy is best-effort</a></li>
-<li><a href="#timezone-configuration">Timezone Configuration</a></li>
-</ul></li>
-</ul>
-
-<h2 id="overview">Overview</h2>
-
-<p>A job is identified as a cron job by the presence of a
-<code>cron_schedule</code> attribute containing a cron-style schedule in the
-<a href="configuration-reference.md#job-objects"><code>Job</code></a> object. Examples of cron schedules
-include &ldquo;every 5 minutes&rdquo; (<code>*/5 * * * *</code>), &ldquo;Fridays at 17:00&rdquo; (<code>* 17 * * FRI</code>), and
-&ldquo;the 1st and 15th day of the month at 03:00&rdquo; (<code>0 3 1,15 *</code>).</p>
-
-<p>Example (available in the <a href="/documentation/latest/vagrant/">Vagrant environment</a>):</p>
-<pre class="highlight text">$ cat /vagrant/examples/job/cron_hello_world.aurora
-# cron_hello_world.aurora
-# A cron job that runs every 5 minutes.
-jobs = [
-  Job(
-    cluster = &#39;devcluster&#39;,
-    role = &#39;www-data&#39;,
-    environment = &#39;test&#39;,
-    name = &#39;cron_hello_world&#39;,
-    cron_schedule = &#39;*/5 * * * *&#39;,
-    task = SimpleTask(
-      &#39;cron_hello_world&#39;,
-      &#39;echo &quot;Hello world from cron, the time is now $(date --rfc-822)&quot;&#39;),
-  ),
-]
-</pre>
-<h2 id="collision-policies">Collision Policies</h2>
-
-<p>The <code>cron_collision_policy</code> field specifies the scheduler&rsquo;s behavior when a new cron job is
-triggered while an older run hasn&rsquo;t finished. The scheduler has two policies available,
-<a href="#kill_existing">KILL_EXISTING</a> and <a href="#cancel_new">CANCEL_NEW</a>.</p>
-
-<h3 id="kill_existing">KILL_EXISTING</h3>
-
-<p>The default policy - on a collision the old instances are killed and a instances with the current
-configuration are started.</p>
-
-<h3 id="cancel_new">CANCEL_NEW</h3>
-
-<p>On a collision the new run is cancelled.</p>
-
-<p>Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
-to recover their progress on a subsequent invocation, otherwise they risk having their work queue
-grow faster than they can process it.</p>
-
-<h2 id="failure-recovery">Failure recovery</h2>
-
-<p>Unlike with services, which aurora will always re-execute regardless of exit status, instances of
-cron jobs retry according to the <code>max_task_failures</code> attribute of the
-<a href="configuration-reference.md#task-objects">Task</a> object. To get &ldquo;run-until-failure&rdquo; semantics,
-set <code>max_task_failures</code> to <code>-1</code>.</p>
-
-<h2 id="interacting-with-cron-jobs-via-the-aurora-cli">Interacting with cron jobs via the Aurora CLI</h2>
-
-<p>Most interaction with cron jobs takes place using the <code>cron</code> subcommand. See <code>aurora2 help cron</code>
-for up-to-date usage instructions.</p>
-
-<h3 id="cron-schedule">cron schedule</h3>
-
-<p>Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
-with a new one. Only future runs will be affected, any existing active tasks are left intact.</p>
-<pre class="highlight text">$ aurora2 cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
-</pre>
-<h3 id="cron-deschedule">cron deschedule</h3>
-
-<p>Deschedules a cron job, preventing future runs but allowing current runs to complete.</p>
-<pre class="highlight text">$ aurora2 cron deschedule devcluster/www-data/test/cron_hello_world
-</pre>
-<h3 id="cron-start">cron start</h3>
-
-<p>Start a cron job immediately, outside of its normal cron schedule.</p>
-<pre class="highlight text">$ aurora2 cron start devcluster/www-data/test/cron_hello_world
-</pre>
-<h3 id="job-killall,-job-restart,-job-kill">job killall, job restart, job kill</h3>
-
-<p>Cron jobs create instances running on the cluster that you can interact with like normal Aurora
-tasks with <code>job kill</code> and <code>job restart</code>.</p>
-
-<h2 id="technical-note-about-syntax">Technical Note About Syntax</h2>
-
-<p><code>cron_schedule</code> uses a restricted subset of BSD crontab syntax. While the
-execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
-<a href="http://www.freebsd.org/cgi/man.cgi?crontab(5)">crontab(5)</a> syntax. See
-<a href="https://github.com/apache/incubator-aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124">the source</a>
-for details.</p>
-
-<h2 id="caveats">Caveats</h2>
-
-<h3 id="failovers">Failovers</h3>
-
-<p>No failover recovery. Aurora does not record the latest minute it fired
-triggers for across failovers. Therefore it&rsquo;s possible to miss triggers
-on failover. Note that this behavior may change in the future.</p>
-
-<p>It&rsquo;s necessary to sync time between schedulers with something like <code>ntpd</code>.
-Clock skew could cause double or missed triggers in the case of a failover.</p>
-
-<h3 id="collision-policy-is-best-effort">Collision policy is best-effort</h3>
-
-<p>Aurora aims to always have <em>at least one copy</em> of a given instance running at a time - it&rsquo;s
-an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
-Consistency.</p>
-
-<p>If your collision policy was <code>CANCEL_NEW</code> and a task has terminated but
-Aurora has not noticed this Aurora will go ahead and create your new
-task.</p>
-
-<p>If your collision policy was <code>KILL_EXISTING</code> and a task was marked <code>LOST</code>
-but not yet GCed Aurora will go ahead and create your new task without
-attempting to kill the old one (outside the GC interval).</p>
-
-<h3 id="timezone-configuration">Timezone Configuration</h3>
-
-<p>Cron timezone is configured indepdendently of JVM timezone with the <code>-cron_timezone</code> flag and
-defaults to UTC.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/deploying-aurora-scheduler/index.html b/publish/documentation/latest/deploying-aurora-scheduler/index.html
deleted file mode 100644
index abbf8ba..0000000
--- a/publish/documentation/latest/deploying-aurora-scheduler/index.html
+++ /dev/null
@@ -1,368 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="deploying-the-aurora-scheduler">Deploying the Aurora Scheduler</h1>
-
-<p>When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
-machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.</p>
-
-<ul>
-<li><a href="#installing-aurora">Installing Aurora</a>
-
-<ul>
-<li><a href="#creating-the-distribution-zip-file-optional">Creating the Distribution .zip File (Optional)</a></li>
-<li><a href="#installing-aurora-1">Installing Aurora</a></li>
-</ul></li>
-<li><a href="#configuring-aurora">Configuring Aurora</a>
-
-<ul>
-<li><a href="#a-note-on-configuration">A Note on Configuration</a></li>
-<li><a href="#replicated-log-configuration">Replicated Log Configuration</a></li>
-<li><a href="#initializing-the-replicated-log">Initializing the Replicated Log</a></li>
-<li><a href="#storage-performance-considerations">Storage Performance Considerations</a></li>
-<li><a href="#network-considerations">Network considerations</a></li>
-</ul></li>
-<li><a href="#running-aurora">Running Aurora</a>
-
-<ul>
-<li><a href="#maintaining-an-aurora-installation">Maintaining an Aurora Installation</a></li>
-<li><a href="#monitoring">Monitoring</a></li>
-<li><a href="#running-stateful-services">Running stateful services</a></li>
-<li><a href="#dedicated-attribute">Dedicated attribute</a>
-
-<ul>
-<li><a href="#syntax">Syntax</a></li>
-<li><a href="#example">Example</a></li>
-</ul></li>
-</ul></li>
-<li><a href="#common-problems">Common problems</a>
-
-<ul>
-<li><a href="#replicated-log-not-initialized">Replicated log not initialized</a></li>
-<li><a href="#symptoms">Symptoms</a></li>
-<li><a href="#solution">Solution</a></li>
-<li><a href="#scheduler-not-registered">Scheduler not registered</a></li>
-<li><a href="#symptoms-1">Symptoms</a></li>
-<li><a href="#solution-1">Solution</a></li>
-<li><a href="#tasks-are-stuck-in-pending-forever">Tasks are stuck in PENDING forever</a></li>
-<li><a href="#symptoms-2">Symptoms</a></li>
-<li><a href="#solution-2">Solution</a></li>
-</ul></li>
-</ul>
-
-<h2 id="installing-aurora">Installing Aurora</h2>
-
-<p>The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
-of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
-should have a JVM (Java 7 or higher) and libmesos (0.20.1) installed.</p>
-
-<h3 id="creating-the-distribution-.zip-file-(optional)">Creating the Distribution .zip File (Optional)</h3>
-
-<p>To create a distribution for installation you will need build tools installed. On Ubuntu this can be
-done with <code>sudo apt-get install build-essential default-jdk</code>.</p>
-<pre class="highlight text">git clone http://git-wip-us.apache.org/repos/asf/incubator-aurora.git
-cd incubator-aurora
-./gradlew distZip
-</pre>
-<p>Copy the generated <code>dist/distributions/aurora-scheduler-*.zip</code> to each node that will run a scheduler.</p>
-
-<h3 id="installing-aurora">Installing Aurora</h3>
-
-<p>Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
-<code>/usr/local/aurora-scheduler</code>.</p>
-<pre class="highlight text">sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
-sudo ln -nfs &quot;$(ls -dt /usr/local/aurora-scheduler-* | head -1)&quot; /usr/local/aurora-scheduler
-</pre>
-<h2 id="configuring-aurora">Configuring Aurora</h2>
-
-<h3 id="a-note-on-configuration">A Note on Configuration</h3>
-
-<p>Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
-&ldquo;configuration file&rdquo; is typically a <code>scheduler.sh</code> shell script of the form.</p>
-<pre class="highlight shell"><span class="c">#!/bin/bash</span>
-<span class="nv">AURORA_HOME</span><span class="o">=</span>/usr/local/aurora-scheduler
-
-<span class="c"># Flags controlling the JVM.</span>
-<span class="nv">JAVA_OPTS</span><span class="o">=(</span>
-  -Xmx2g
-  -Xms2g
-  <span class="c"># GC tuning, etc.</span>
-<span class="o">)</span>
-
-<span class="c"># Flags controlling the scheduler.</span>
-<span class="nv">AURORA_FLAGS</span><span class="o">=(</span>
-  -http_port<span class="o">=</span>8081
-  <span class="c"># Log configuration, etc.</span>
-<span class="o">)</span>
-
-<span class="c"># Environment variables controlling libmesos</span>
-<span class="nb">export </span><span class="nv">JAVA_HOME</span><span class="o">=</span>...
-<span class="nb">export </span><span class="nv">GLOG_v</span><span class="o">=</span>1
-<span class="nb">export </span><span class="nv">LIBPROCESS_PORT</span><span class="o">=</span>8083
-
-<span class="nv">JAVA_OPTS</span><span class="o">=</span><span class="s2">&quot;</span><span class="k">${</span><span class="nv">JAVA_OPTS</span><span class="p">[*]</span><span class="k">}</span><span class="s2">&quot;</span> <span class="nb">exec</span> <span class="s2">&quot;</span><span class="nv">$AURORA_HOME</span><span class="s2">/bin/aurora-scheduler&quot;</span> <span class="s2">&quot;</span><span class="k">${</span><span class="nv">AURORA_FLAGS</span><span class="p">[@]</span><span class="k">}</span><span class="s2">&quot;</span>
-</pre>
-<p>That way Aurora&rsquo;s current flags are visible in <code>ps</code> and in the <code>/vars</code> admin endpoint.</p>
-
-<p>Examples are available under <code>examples/scheduler/</code>. For a list of available Aurora flags and their
-documentation run</p>
-<pre class="highlight text">/usr/local/aurora-scheduler/bin/aurora-scheduler -help
-</pre>
-<h3 id="replicated-log-configuration">Replicated Log Configuration</h3>
-
-<p>All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
-including where in the cluster they are being run and the configuration for running them, as
-well as other information such as metadata needed to reconnect to the Mesos master, resource
-quotas, and any other locks in place.</p>
-
-<p>Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
-leader at a given time - the other schedulers follow log writes and prepare to take over as leader
-but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
-production deployment depending on failure tolerance and they must have persistent storage.</p>
-
-<p>In a cluster with <code>N</code> schedulers, the flag <code>-native_log_quorum_size</code> should be set to
-<code>floor(N/2) + 1</code>. So in a cluster with 1 scheduler it should be set to <code>1</code>, in a cluster with 3 it
-should be set to <code>2</code>, and in a cluster of 5 it should be set to <code>3</code>.</p>
-
-<table><thead>
-<tr>
-<th>Number of schedulers (N)</th>
-<th><code>-native_log_quorum_size</code> setting (<code>floor(N/2) + 1</code>)</th>
-</tr>
-</thead><tbody>
-<tr>
-<td>1</td>
-<td>1</td>
-</tr>
-<tr>
-<td>3</td>
-<td>2</td>
-</tr>
-<tr>
-<td>5</td>
-<td>3</td>
-</tr>
-<tr>
-<td>7</td>
-<td>4</td>
-</tr>
-</tbody></table>
-
-<p><em>Incorrectly setting this flag will cause data corruption to occur!</em></p>
-
-<p>See <a href="storage-config.md#scheduler-storage-configuration-flags">this document</a> for more replicated
-log and storage configuration options.</p>
-
-<h2 id="initializing-the-replicated-log">Initializing the Replicated Log</h2>
-
-<p>Before you start Aurora you will also need to initialize the log on the first master.</p>
-<pre class="highlight text">mesos-log initialize --path=&quot;$AURORA_HOME/scheduler/db&quot;
-</pre>
-<p>Failing to do this will result the following message when you try to start the scheduler.</p>
-<pre class="highlight text">Replica in EMPTY status received a broadcasted recover request
-</pre>
-<h3 id="storage-performance-considerations">Storage Performance Considerations</h3>
-
-<p>See <a href="/documentation/latest/scheduler-storage/">this document</a> for scheduler storage performance considerations.</p>
-
-<h3 id="network-considerations">Network considerations</h3>
-
-<p>The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
-and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
-replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
-to ZooKeeper) or explicitly set in the startup script as follows:</p>
-<pre class="highlight text"># ...
-AURORA_FLAGS=(
-  # ...
-  -http_port=8081
-  # ...
-)
-# ...
-export LIBPROCESS_PORT=8083
-# ...
-</pre>
-<h2 id="running-aurora">Running Aurora</h2>
-
-<p>Configure a supervisor like <a href="http://mmonit.com/monit/">Monit</a> or
-<a href="http://supervisord.org/">supervisord</a> to run the created <code>scheduler.sh</code> file and restart it
-whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
-supports an active health checking protocol on its admin HTTP interface - if a <code>GET /health</code> times
-out or returns anything other than <code>200 OK</code> the scheduler process is unhealthy and should be
-restarted.</p>
-
-<p>For example, monit can be configured with</p>
-<pre class="highlight text">if failed port 8081 send &quot;GET /health HTTP/1.0\r\n&quot; expect &quot;OK\n&quot; with timeout 2 seconds for 10 cycles then restart
-</pre>
-<p>assuming you set <code>-http_port=8081</code>.</p>
-
-<h3 id="maintaining-an-aurora-installation">Maintaining an Aurora Installation</h3>
-
-<h3 id="monitoring">Monitoring</h3>
-
-<p>Please see our dedicated <a href="/documentation/latest/monitoring/">monitoring guide</a> for in-depth discussion on monitoring.</p>
-
-<h3 id="running-stateful-services">Running stateful services</h3>
-
-<p>Aurora is best suited to run stateless applications, but it also accommodates for stateful services
-like databases, or services that otherwise need to always run on the same machines.</p>
-
-<h4 id="dedicated-attribute">Dedicated attribute</h4>
-
-<p>The Mesos slave has the <code>--attributes</code> command line argument which can be used to mark a slave with
-static attributes (not to be confused with <code>--resources</code>, which are dynamic and accounted).</p>
-
-<p>Aurora makes these attributes available for matching with scheduling
-<a href="configuration-reference.md#specifying-scheduling-constraints">constraints</a>.  Most of these
-constraints are arbitrary and available for custom use.  There is one exception, though: the
-<code>dedicated</code> attribute.  Aurora treats this specially, and only allows matching jobs to run on these
-machines, and will only schedule matching jobs on these machines.</p>
-
-<h5 id="syntax">Syntax</h5>
-
-<p>The dedicated attribute has semantic meaning. The format is <code>$role(/.*)?</code>. When a job is created,
-the scheduler requires that the <code>$role</code> component matches the <code>role</code> field in the job
-configuration, and will reject the job creation otherwise.  The remainder of the attribute is
-free-form. We&rsquo;ve developed the idiom of formatting this attribute as <code>$role/$job</code>, but do not
-enforce this.</p>
-
-<h5 id="example">Example</h5>
-
-<p>Consider the following slave command line:</p>
-<pre class="highlight text">mesos-slave --attributes=&quot;host:$HOST;rack:$RACK;dedicated:db_team/redis&quot; ...
-</pre>
-<p>And this job configuration:</p>
-<pre class="highlight text">Service(
-  name = &#39;redis&#39;,
-  role = &#39;db_team&#39;,
-  constraints = {
-    &#39;dedicated&#39;: &#39;db_team/redis&#39;
-  }
-  ...
-)
-</pre>
-<p>The job configuration is indicating that it should only be scheduled on slaves with the attribute
-<code>dedicated:dba_team/redis</code>.  Additionally, Aurora will prevent any tasks that do <em>not</em> have that
-constraint from running on those slaves.</p>
-
-<h2 id="common-problems">Common problems</h2>
-
-<p>So you&rsquo;ve started your first cluster and are running into some issues? We&rsquo;ve collected some common
-stumbling blocks and solutions here to help get you moving.</p>
-
-<h3 id="replicated-log-not-initialized">Replicated log not initialized</h3>
-
-<h4 id="symptoms">Symptoms</h4>
-
-<ul>
-<li>Scheduler RPCs and web interface claim <code>Storage is not READY</code></li>
-<li>Scheduler log repeatedly prints messages like</li>
-</ul>
-<pre class="highlight text">  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
-  received a broadcasted recover request
-  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
-  from a replica in EMPTY status
-</pre>
-<h4 id="solution">Solution</h4>
-
-<p>When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
-consider their database to be empty by <a href="#initializing-the-replicated-log">initializing</a> the
-replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
-of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.</p>
-
-<h3 id="scheduler-not-registered">Scheduler not registered</h3>
-
-<h4 id="symptoms">Symptoms</h4>
-
-<p>Scheduler log contains</p>
-<pre class="highlight text">Framework has not been registered within the tolerated delay.
-</pre>
-<h4 id="solution">Solution</h4>
-
-<p>Double-check that the scheduler is configured correctly to reach the master. If you are registering
-the master in ZooKeeper, make sure command line argument to the master:</p>
-<pre class="highlight text">--zk=zk://$ZK_HOST:2181/mesos/master
-</pre>
-<p>is the same as the one on the scheduler:</p>
-<pre class="highlight text">-mesos_master_address=zk://$ZK_HOST:2181/mesos/master
-</pre>
-<h3 id="tasks-are-stuck-in-pending-forever">Tasks are stuck in <code>PENDING</code> forever</h3>
-
-<h4 id="symptoms">Symptoms</h4>
-
-<p>The scheduler is registered, and (receiving offers](docs/monitoring.md#scheduler<em>resource</em>offers),
-but tasks are perpetually shown as <code>PENDING - Constraint not satisfied: host</code>.</p>
-
-<h4 id="solution">Solution</h4>
-
-<p>Check that your slaves are configured with <code>host</code> and <code>rack</code> attributes.  Aurora requires that
-slaves are tagged with these two common failure domains to ensure that it can safely place tasks
-such that jobs are resilient to failure.</p>
-
-<p>See our <a href="examples/vagrant/upstart/mesos-slave.conf">vagrant example</a> for details.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/developing-aurora-client/index.html b/publish/documentation/latest/developing-aurora-client/index.html
deleted file mode 100644
index bd5a053..0000000
--- a/publish/documentation/latest/developing-aurora-client/index.html
+++ /dev/null
@@ -1,201 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="getting-started">Getting Started</h1>
-
-<p>Aurora consists of four main pieces: the scheduler (which finds resources in the cluster that can be used to run a job), the executor (which uses the resources assigned by the scheduler to run a job), the command-line client, and the web-ui. For information about working on the scheduler or the webUI, see the file &ldquo;developing-aurora-scheduler.md&rdquo; in this directory.</p>
-
-<p>If you want to work on the command-line client, this is the place for you!</p>
-
-<p>The client is written in Python, and unlike the server side of things, we build the client using the Pants build tool, instead of Gradle. Pants is a tool that was built by twitter for handling builds of large collaborative systems. You can see a detailed explanation of
-pants <a href="http://pantsbuild.github.io/python-readme.html">here</a>.</p>
-
-<p>To build the client executable, run the following in a command-shell:</p>
-<pre class="highlight text">$ ./pants src/main/python/apache/aurora/client/cli:aurora2
-</pre>
-<p>This will produce a python executable <em>pex</em> file in <code>dist/aurora2.pex</code>. Pex files
-are fully self-contained executables: just copy the pex file into your path, and you&rsquo;ll be able to run it. For example, for a typical installation:</p>
-<pre class="highlight text">$ cp dist/aurora2.pex /usr/local/bin/aurora
-</pre>
-<p>To run all of the client tests:</p>
-<pre class="highlight text">$ ./pasts src/test/python/apache/aurora/client/:all
-</pre>
-<h1 id="client-configuration">Client Configuration</h1>
-
-<p>The client uses a configuration file that specifies available clusters. More information about the
-contents of this file can be found in the
-<a href="/documentation/latest/client-cluster-configuration/">Client Cluster Configuration</a> documentation. Information about
-how the client locates this file can be found in the
-<a href="client-commands.md#cluster-configuration">Client Commands</a> documentation.</p>
-
-<h1 id="client-versions">Client Versions</h1>
-
-<p>There are currently two versions of the aurora client, imaginatively known as v1 and v2. All new development is done entirely in v2, but we continue to support and fix bugs in v1, until we get to the point where v2 is feature-complete and tested, and aurora users have had some time at adapt and switch their processes to use v2.</p>
-
-<p>Both versions are built on the same underlying API code.</p>
-
-<p>Client v1 was implemented using twitter.common.app. The command-line processing code for v1 can be found in <code>src/main/python/apache/aurora/client/commands</code> and
-<code>src/main/python/apache/aurora/client/bin</code>.</p>
-
-<p>Client v2 was implemented using its own noun/verb framework. The client v2 code can be found in <code>src/main/python/apache/aurora/client/cli</code>, and the noun/verb framework can be
-found in the <code>__init__.py</code> file in that directory.</p>
-
-<h1 id="building-and-testing-the-client">Building and Testing the Client</h1>
-
-<p>Building and testing the client code are both done using Pants. The relevant targets to know about are:</p>
-
-<ul>
-<li>Build a client v2 executable: <code>./pants src/main/python/apache/aurora/client/cli:aurora2</code></li>
-<li>Test client v2 code: <code>./pants ./pants src/test/python/apache/aurora/client/cli:all</code></li>
-<li>Build a client v1 executable: <code>./pants src/main/python/apache/aurora/client/bin:aurora_client</code></li>
-<li>Test client v1 code: <code>./pants src/main/python/apache/aurora/client/commands:all</code></li>
-<li>Test all client code: <code>./pants src/main/python/apache/aurora/client:all</code></li>
-</ul>
-
-<h1 id="overview-of-the-client-architecture">Overview of the Client Architecture</h1>
-
-<p>The client is built on a stacked architecture:</p>
-
-<ol>
-<li><p>At the lowest level, we have a thrift RPC API interface
-to the aurora scheduler. The interface is declared in thrift, in the file
-<code>src/main/thrift/org/apache/aurora/gen/api.thrift</code>.</p>
-
-<ol>
-<li>On top of the primitive API, we have a client API. The client API
-takes the primitive operations provided by the scheduler, and uses them
-to implement client-side behaviors. For example, when you update a job,
-on the scheduler, that&rsquo;s done by a sequence of operations.  The sequence is implemented
-by the client API <code>update</code> method, which does the following using the thrift API:
-
-<ul>
-<li>fetching the state of task instances in the mesos cluster, and figuring out which need
-to be updated;</li>
-<li>For each task to be updated:
-
-<ul>
-<li>killing the old version;</li>
-<li>starting the new version;</li>
-<li>monitoring the new version to ensure that the update succeeded.</li>
-</ul></li>
-</ul></li>
-<li>On top of the API, we have the command-line client itself. The core client, at this level,
-consists of the interface to the command-line which the user will use to interact with aurora.
-The client v2 code is found in <code>src/python/apache/aurora/client/cli</code>. In the <code>cli</code> directory,
-the rough structure is as follows:
-
-<ul>
-<li><code>__init__.py</code> contains the noun/verb command-line processing framework used by client v2.</li>
-<li><code>jobs.py</code> contains the implementation of the core <code>job</code> noun, and all of its operations.</li>
-<li><code>bridge.py</code> contains the implementation of a component that allows us to ship a
-combined client that runs both v1 and v2 client commands during the transition period.</li>
-<li><code>client.py</code> contains the code that binds the client v2 nouns and verbs into an executable.</li>
-</ul></li>
-</ol></li>
-</ol>
-
-<h1 id="running/debugging-the-client">Running/Debugging the Client</h1>
-
-<p>For manually testing client changes against a cluster, we use vagrant. To start a virtual cluster,
-you need to install a working vagrant environment, and then run &ldquo;vagrant up&rdquo; for the root of
-the aurora workspace. This will create a vagrant host named &ldquo;devcluster&rdquo;, with a mesos master,
-a set of mesos slaves, and an aurora scheduler.</p>
-
-<p>To use the devcluster, you need to bring it up by running <code>vagrant up</code>, and then connect to the vagrant host using <code>vagrant ssh</code>. This will open a bash session on the virtual machine hosting the devcluster. In the home directory, there are two key paths to know about:</p>
-
-<ul>
-<li><code>~/aurora</code>: this is a copy of the git workspace in which you launched the vagrant cluster.
- To test client changes, you&rsquo;ll use this copy.</li>
-<li><code>/vagrant</code>: this is a mounted filesystem that&rsquo;s a direct image of your git workspace.
- This isn&rsquo;t a copy - it is your git workspace. Editing files on your host machine will
- be immediately visible here, because they are the same files.</li>
-</ul>
-
-<p>Whenever the scheduler is modified, to update your vagrant environment to use the new scheduler,
-you&rsquo;ll need to re-initialize your vagrant images. To do this, you need to run two commands:</p>
-
-<ul>
-<li><code>vagrant destroy</code>: this will delete the old devcluster image.</li>
-<li><code>vagrant up</code>: this creates a fresh devcluster image based on the current state of your workspace.</li>
-</ul>
-
-<p>You should try to minimize rebuilding vagrant images; it&rsquo;s not horribly slow, but it does take a while.</p>
-
-<p>To test client changes:</p>
-
-<ul>
-<li>Make a change in your local workspace, and commit it.</li>
-<li><code>vagrant ssh</code> into the devcluster.</li>
-<li><code>cd aurora</code></li>
-<li>Pull your changes into the vagrant copy: <code>git pull /vagrant *branchname*</code>.</li>
-<li>Build the modified client using pants.</li>
-<li>Run your command using <code>aurora2</code>. (You don&rsquo;t need to do any install; the aurora2 command
- is a symbolic link to the executable generated by pants.)</li>
-</ul>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/developing-aurora-scheduler/index.html b/publish/documentation/latest/developing-aurora-scheduler/index.html
deleted file mode 100644
index 96b553d..0000000
--- a/publish/documentation/latest/developing-aurora-scheduler/index.html
+++ /dev/null
@@ -1,163 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <p>Java code in the aurora repo is built with <a href="http://gradle.org">Gradle</a>.</p>
-
-<h1 id="getting-started">Getting Started</h1>
-
-<p>You will need Java 7 installed and on your <code>PATH</code> or unzipped somewhere with <code>JAVA_HOME</code> set. Then</p>
-<pre class="highlight text">./gradlew tasks
-</pre>
-<p>will bootstrap the build system and show available tasks. This can take a while the first time you
-run it but subsequent runs will be much faster due to cached artifacts.</p>
-
-<h2 id="running-the-tests">Running the Tests</h2>
-
-<p>Aurora has a comprehensive unit test suite. To run the tests use</p>
-<pre class="highlight text">./gradlew build
-</pre>
-<p>Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
-tests use</p>
-<pre class="highlight text">./gradlew clean build
-</pre>
-<h2 id="running-the-build-with-code-quality-checks">Running the build with code quality checks</h2>
-
-<p>To speed up development iteration, the plain gradle commands will not run static analysis tools.
-However, you should run these before posting a review diff, and <strong>always</strong> run this before pushing a
-commit to origin/master.</p>
-<pre class="highlight text">./gradlew build -Pq
-</pre>
-<h2 id="creating-a-bundle-for-deployment">Creating a bundle for deployment</h2>
-
-<p>Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with</p>
-<pre class="highlight text">./gradlew distZip
-</pre>
-<p>or a tar file containing the same files with</p>
-<pre class="highlight text">./gradlew distTar
-</pre>
-<p>The output file will be written to <code>dist/distributions/aurora-scheduler.zip</code> or
-<code>dist/distributions/aurora-scheduler.tar</code>.</p>
-
-<h1 id="developing-aurora-java-code">Developing Aurora Java code</h1>
-
-<h2 id="setting-up-an-ide">Setting up an IDE</h2>
-
-<p>Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run</p>
-<pre class="highlight text">./gradlew idea
-</pre>
-<p>and import the generated <code>aurora.ipr</code> file.</p>
-
-<h2 id="adding-or-upgrading-a-dependency">Adding or Upgrading a Dependency</h2>
-
-<p>New dependencies can be added from Maven central by adding a <code>compile</code> dependency to <code>build.gradle</code>.
-For example, to add a dependency on <code>com.example</code>&rsquo;s <code>example-lib</code> 1.0 add this block:</p>
-<pre class="highlight text">compile &#39;com.example:example-lib:1.0&#39;
-</pre>
-<p>NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
-Apache Foundation&rsquo;s third-party licensing
-<a href="http://www.apache.org/legal/resolved.html#category-x">policy</a>.</p>
-
-<h1 id="developing-aurora-ui">Developing Aurora UI</h1>
-
-<h2 id="installing-bower-(optional)">Installing bower (optional)</h2>
-
-<p>Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
-managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
-update JS libraries. Bower can be installed using the following command:</p>
-<pre class="highlight text">npm install -g bower
-</pre>
-<p>Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:</p>
-<pre class="highlight text">brew install node
-</pre>
-<p>For more node.js installation options refer to <a href="https://github.com/joyent/node/wiki/Installation">https://github.com/joyent/node/wiki/Installation</a>.</p>
-
-<p>More info on installing and using bower can be found at: <a href="http://bower.io/">http://bower.io/</a>. Once installed, you can
-use the following commands to view and modify the bower repo at
-3rdparty/javascript/bower_components</p>
-<pre class="highlight text">bower list
-bower install &lt;library name&gt;
-bower remove &lt;library name&gt;
-bower update &lt;library name&gt;
-bower help
-</pre>
-<h1 id="developing-the-aurora-build-system">Developing the Aurora Build System</h1>
-
-<h2 id="bootstrapping-gradle">Bootstrapping Gradle</h2>
-
-<p>The following files were autogenerated by <code>gradle wrapper</code> using gradle 1.8&rsquo;s
-<a href="http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html">Wrapper</a> plugin and
-should not be modified directly:</p>
-<pre class="highlight text">./gradlew
-./gradlew.bat
-./gradle/wrapper/gradle-wrapper.jar
-./gradle/wrapper/gradle-wrapper.properties
-</pre>
-<p>To upgrade Gradle unpack the new version somewhere, run <code>/path/to/new/gradle wrapper</code> in the
-repository root and commit the changed files.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/hooks/index.html b/publish/documentation/latest/hooks/index.html
deleted file mode 100644
index c7f0a61..0000000
--- a/publish/documentation/latest/hooks/index.html
+++ /dev/null
@@ -1,327 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="hooks-for-aurora-client-api">Hooks for Aurora Client API</h1>
-
-<ul>
-<li><a href="#introduction">Introduction</a></li>
-<li><a href="#hook-types">Hook Types</a></li>
-<li><a href="#execution-order">Execution Order</a></li>
-<li><a href="#hookable-methods">Hookable Methods</a></li>
-<li><a href="#activating-and-using-hooks">Activating and Using Hooks</a></li>
-<li><a href="#aurora-config-file-settings">.aurora Config File Settings</a></li>
-<li><a href="#command-line">Command Line</a></li>
-<li><a href="#hooks-protocol">Hooks Protocol</a>
-
-<ul>
-<li><a href="#pre_-methods">pre_ Methods</a></li>
-<li><a href="#err_-methods">err_ Methods</a></li>
-<li><a href="#post_-methods">post_ Methods</a></li>
-</ul></li>
-<li><a href="#generic-hooks">Generic Hooks</a></li>
-<li><a href="#hooks-process-checklist">Hooks Process Checklist</a></li>
-</ul>
-
-<h2 id="introduction">Introduction</h2>
-
-<p>You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.</p>
-
-<p>Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method&rsquo;s actions. The hook executes on the client side, specifically on the machine executing Aurora commands.</p>
-
-<p>The catch is that hooks are associated with Aurora Client API methods, which users don&rsquo;t directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.</p>
-
-<p><strong>Terminology Note</strong>: From now on, &ldquo;method(s)&rdquo; refer to Client API methods, and &ldquo;command(s)&rdquo; refer to Command Line commands.</p>
-
-<h2 id="hook-types">Hook Types</h2>
-
-<p>Hooks have three basic types, differing by when they run with respect to their associated method.</p>
-
-<p><code>pre_&lt;method_name&gt;</code>: When its associated method is called, the <code>pre_</code> hook executes first, then the called method. If the <code>pre_</code> hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.</p>
-
-<p>Note that a <code>pre_</code> hook can error-trap internally so it does not
-return <code>False</code>. Designers/contributors of new <code>pre_</code> hooks should
-consider whether or not to error-trap them. You can error trap at the
-highest level very generally and always pass the <code>pre_</code> hook by
-returning <code>True</code>. For example:</p>
-<pre class="highlight python"><span class="k">def</span> <span class="nf">pre_create</span><span class="p">(</span><span class="o">...</span><span class="p">):</span>
-  <span class="n">do_something</span><span class="p">()</span>  <span class="c"># if do_something fails with an exception, the create_job is not attempted!</span>
-  <span class="k">return</span> <span class="bp">True</span>
-
-<span class="c"># However...</span>
-<span class="k">def</span> <span class="nf">pre_create</span><span class="p">(</span><span class="o">...</span><span class="p">):</span>
-  <span class="k">try</span><span class="p">:</span>
-    <span class="n">do_something</span><span class="p">()</span>  <span class="c"># may cause exception</span>
-  <span class="k">except</span> <span class="n">Exception</span><span class="p">:</span>  <span class="c"># generic error trap will catch it</span>
-    <span class="k">pass</span>  <span class="c"># and ignore the exception</span>
-  <span class="k">return</span> <span class="bp">True</span>  <span class="c"># create_job will run in any case!</span>
-</pre>
-<p><code>post_&lt;method_name&gt;</code>: A <code>post_</code> hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A <code>post_</code> hook&rsquo;s error is trapped, and any later operations are unaffected.</p>
-
-<p><code>err_&lt;method_name&gt;</code>: Executes only when its associated method returns a status other than OK or throws an exception. If an <code>err_</code> hook fails, the already executed method is unaffected. An <code>err_</code> hook&rsquo;s error is trapped, and any later operations are unaffected.</p>
-
-<h2 id="execution-order">Execution Order</h2>
-
-<p>A command with <code>pre_</code>, <code>post_</code>, and <code>err_</code> hooks defined and activated for its called method executes in the following order when the method successfully executes:</p>
-
-<ol>
-<li>Command called</li>
-<li>Command code executes</li>
-<li>Method Called</li>
-<li><code>pre_</code> method hook runs</li>
-<li>Method runs and successfully finishes</li>
-<li><code>post_</code> method hook runs</li>
-<li>Command code executes</li>
-<li>Command execution ends</li>
-</ol>
-
-<p>The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:</p>
-
-<ol>
-<li>Command called</li>
-<li>Command code executes</li>
-<li>Method Called</li>
-<li><code>pre_</code> method hook runs</li>
-<li>Method runs and fails</li>
-<li><code>err_</code> method hook runs</li>
-<li>Command Code executes (if <code>err_</code> method does not end the command execution)</li>
-<li>Command execution ends</li>
-</ol>
-
-<p>Note that the <code>post_</code> and <code>err_</code> hooks for the same method can never both run for a single execution of that method.</p>
-
-<h2 id="hookable-methods">Hookable Methods</h2>
-
-<p>You can associate <code>pre_</code>, <code>post_</code>, and <code>err_</code> hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command&rsquo;s other code executes. Similarly, multiple commands can call the same method. We also list the methods&#39; argument signatures, which are used by their associated hooks. <a name="Chart"></a></p>
-
-<table><thead>
-<tr>
-<th>Aurora Client API Method</th>
-<th>Client API Method Argument Signature</th>
-<th>Aurora Command Line Command</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>cancel_update</code></td>
-<td><code>self</code>, <code>job_key</code></td>
-<td><code>cancel_update</code></td>
-</tr>
-<tr>
-<td><code>create_job</code></td>
-<td><code>self</code>, <code>config</code></td>
-<td><code>create</code>, <code>runtask</td>
-</tr>
-<tr>
-<td><code>restart</code></td>
-<td><code>self</code>, <code>job_key</code>, <code>shards</code>, <code>update_config</code>, <code>health_check_interval_seconds</code></td>
-<td><code>restart</code></td>
-</tr>
-<tr>
-<td><code>update_job</code></td>
-<td><code>self</code>, <code>config</code>, <code>health_check_interval_seconds=3</code>, <code>shards=None</code></td>
-<td><code>update</code></td>
-</tr>
-<tr>
-<td><code>kill_job</code></td>
-<td><code>self</code>, <code>job_key</code>, <code>shards=None</code></td>
-<td><code>kill</code></td>
-</tr>
-</tbody></table>
-
-<p>Some specific examples:</p>
-
-<ul>
-<li><p><code>pre_create_job</code> executes when a <code>create_job</code> method is called, and before the <code>create_job</code> method itself executes.</p></li>
-<li><p><code>post_cancel_update</code> executes after a <code>cancel_update</code> method has successfully finished running.</p></li>
-<li><p><code>err_kill_job</code> executes when the <code>kill_job</code> method is called, but doesn&rsquo;t successfully finish running.</p></li>
-</ul>
-
-<h2 id="activating-and-using-hooks">Activating and Using Hooks</h2>
-
-<p>By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your <code>.aurora</code> config file to activate them both for the configuration as a whole as well as for individual <code>Job</code>s. And, of course, you will need to define in your config file what happens when a particular hook executes.</p>
-
-<h2 id=".aurora-config-file-settings">.aurora Config File Settings</h2>
-
-<p>You can define a top-level <code>hooks</code> variable in any <code>.aurora</code> config file. <code>hooks</code> is a list of all objects that define hooks used by <code>Job</code>s defined in that config file. If you do not want to define any hooks for a configuration, <code>hooks</code> is optional.</p>
-<pre class="highlight text">hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
-</pre>
-<p>Be careful when assembling a config file using <code>include</code> on multiple smaller config files. If there are multiple files that assign a value to <code>hooks</code>, only the last assignment made will stick. For example, if <code>x.aurora</code> has <code>hooks = [a, b, c]</code> and <code>y.aurora</code> has <code>hooks = [d, e, f]</code> and <code>z.aurora</code> has, in this order, <code>include x.aurora</code> and <code>include y.aurora</code>, the <code>hooks</code> value will be <code>[d, e, f]</code>.</p>
-
-<p>Also, for any <code>Job</code> that you want to use hooks with, its <code>Job</code> definition in the <code>.aurora</code> config file must set an <code>enable_hooks</code> flag to <code>True</code> (it defaults to <code>False</code>). By default, hooks are disabled and you must enable them for <code>Job</code>s of your choice.</p>
-
-<p>To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won&rsquo;t work, nor will only activating hooks for your config file as a whole. You must also specify the hooks&#39; defining object in the <code>hooks</code> variable.</p>
-
-<p>Recall that <code>.aurora</code> config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file&rsquo;s <code>hooks</code> variable.</p>
-<pre class="highlight python"><span class="n">jobs</span> <span class="o">=</span> <span class="p">[</span>
-        <span class="n">Job</span><span class="p">(</span><span class="n">enable_hooks</span> <span class="o">=</span> <span class="bp">True</span><span class="p">,</span> <span class="n">cluster</span> <span class="o">=</span> <span class="n">c</span><span class="p">,</span> <span class="n">env</span> <span class="o">=</span> <span class="s">&#39;prod&#39;</span><span class="p">)</span> <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="p">(</span><span class="s">&#39;cluster1&#39;</span><span class="p">,</span> <span class="s">&#39;cluster2&#39;</span><span class="p">)</span>
-       <span class="p">]</span>
-<span class="n">jobs</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span>
-   <span class="n">Job</span><span class="p">(</span><span class="n">cluster</span> <span class="o">=</span> <span class="n">c</span><span class="p">,</span> <span class="n">env</span> <span class="o">=</span> <span class="s">&#39;prod&#39;</span><span class="p">,</span> <span class="n">role</span> <span class="o">=</span> <span class="n">getpass</span><span class="o">.</span><span class="n">getuser</span><span class="p">())</span> <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="p">(</span><span class="s">&#39;cluster1&#39;</span><span class="p">,</span> <span class="s">&#39;cluster2&#39;</span><span class="p">))</span>
-   <span class="c"># Hooks disabled for these jobs</span>
-</pre>
-<h2 id="command-line">Command Line</h2>
-
-<p>All Aurora Command Line commands now accept an <code>.aurora</code> config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a <code>.aurora</code> file parameter, any hooks specified and activated in the <code>.aurora</code> file can be used. For example:</p>
-<pre class="highlight text">aurora restart cluster1/role/env/app myapp.aurora
-</pre>
-<p>The command activates any hooks specified and activated in <code>myapp.aurora</code>. For the <code>restart</code> command, that is the only thing the <code>myapp.aurora</code> parameter does. So, if the command was the following, since there is no <code>.aurora</code> config file to specify any hooks, no hooks on the <code>restart</code> command can run.</p>
-<pre class="highlight text">aurora restart cluster1/role/env/app
-</pre>
-<h2 id="hooks-protocol">Hooks Protocol</h2>
-
-<p>Any object defined in the <code>.aurora</code> config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the <code>hooks</code> list in your config file.</p>
-
-<p>Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.</p>
-
-<p>The following example defines a class containing a <code>pre_kill_job</code> hook definition that calls another method defined in the class.</p>
-<pre class="highlight python"><span class="c"># Defines a method pre_kill_job</span>
-<span class="k">class</span> <span class="nc">KillConfirmer</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
-  <span class="k">def</span> <span class="nf">confirm</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">msg</span><span class="p">):</span>
-    <span class="k">return</span> <span class="nb">raw_input</span><span class="p">(</span><span class="n">msg</span><span class="p">)</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="o">==</span> <span class="s">&#39;yes&#39;</span>
-
-  <span class="k">def</span> <span class="nf">pre_kill_job</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">job_key</span><span class="p">,</span> <span class="n">shards</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span>
-    <span class="n">shards</span> <span class="o">=</span> <span class="p">(</span><span class="s">&#39;shards </span><span class="si">%</span><span class="s">s&#39;</span> <span class="o">%</span> <span class="n">shards</span><span class="p">)</span> <span class="k">if</span> <span class="n">shards</span> <span class="ow">is</span> <span class="ow">not</span> <span class="bp">None</span> <span class="k">else</span> <span class="s">&#39;all shards&#39;</span>
-    <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">confirm</span><span class="p">(</span><span class="s">&#39;Are you sure you want to kill </span><span class="si">%</span><span class="s">s (</span><span class="si">%</span><span class="s">s)? (yes/no): &#39;</span>
-                        <span class="o">%</span> <span class="p">(</span><span class="n">job_key</span><span class="p">,</span> <span class="n">shards</span><span class="p">))</span>
-</pre>
-<h3 id="pre_-methods">pre_ Methods</h3>
-
-<p><code>pre_</code> methods have the signature:</p>
-<pre class="highlight text">pre_&lt;API method name&gt;(self, &lt;associated method&#39;s signature&gt;)
-</pre>
-<p><code>pre_</code> methods have the same signature as their associated method, with the addition of <code>self</code> as the first parameter. See the <a href="#Chart">chart</a> above for the mapping of parameters to methods. When writing <code>pre_</code> methods, you can use the <code>*</code> and <code>**</code> syntax to designate that all unspecified parameters are passed in a list to the <code>*</code>ed variable and all named parameters with values are passed as name/value pairs to the <code>**</code>ed variable.</p>
-
-<p>If this method returns False, the API command call aborts.</p>
-
-<h3 id="err_-methods">err_ Methods</h3>
-
-<p><code>err_</code> methods have the signature:</p>
-<pre class="highlight text">err_&lt;API method name&gt;(self, exc, &lt;associated method&#39;s signature&gt;)
-</pre>
-<p><code>err_</code> methods have the same signature as their associated method, with the addition of a first parameter <code>self</code> and a second parameter <code>exc</code>. <code>exc</code> is either a result with responseCode other than <code>ResponseCode.OK</code> or an <code>Exception</code>. See the <a href="#Chart">chart</a> above for the mapping of parameters to methods. When writing <code>err</code>_ methods, you can use the <code>*</code> and <code>**</code> syntax to designate that all unspecified parameters are passed in a list to the <code>*</code>ed variable and all named parameters with values are passed as name/value pairs to the <code>**</code>ed variable.</p>
-
-<p><code>err_</code> method return codes are ignored.</p>
-
-<h3 id="post_-methods">post_ Methods</h3>
-
-<p><code>post_</code> methods have the signature:</p>
-<pre class="highlight text">post_&lt;API method name&gt;(self, result, &lt;associated method signature&gt;)
-</pre>
-<p><code>post_</code> method parameters are <code>self</code>, then <code>result</code>, followed by the same parameter signature as their associated method. <code>result</code> is the result of the associated method call. See the <a href="#chart">chart</a> above for the mapping of parameters to methods. When writing <code>post_</code> methods, you can use the <code>*</code> and <code>**</code> syntax to designate that all unspecified arguments are passed in a list to the <code>*</code>ed parameter and all unspecified named arguments with values are passed as name/value pairs to the <code>**</code>ed parameter.</p>
-
-<p><code>post_</code> method return codes are ignored.</p>
-
-<h2 id="generic-hooks">Generic Hooks</h2>
-
-<p>There are five Aurora API Methods which any of the three hook types can attach to. Thus, there are 15 possible hook/method combinations for a single <code>.aurora</code> config file. Say that you define <code>pre_</code> and <code>post_</code> hooks for the <code>restart</code> method. That leaves 13 undefined hook/method combinations; <code>err_restart</code> and the 3 <code>pre_</code>, <code>post_</code>, and <code>err_</code> hooks for each of the other 4 hookable methods. You can define what happens when any of these otherwise undefined 13 hooks execute via a generic hook, whose signature is:</p>
-<pre class="highlight python"><span class="n">generic_hook</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">hook_config</span><span class="p">,</span> <span class="n">event</span><span class="p">,</span> <span class="n">method_name</span><span class="p">,</span> <span class="n">result_or_err</span><span class="p">,</span> <span class="n">args</span><span class="o">*</span><span class="p">,</span> <span class="n">kw</span><span class="o">**</span><span class="p">)</span>
-</pre>
-<p>where:</p>
-
-<ul>
-<li><p><code>hook_config</code> is a named tuple of <code>config</code> (the Pystashio <code>config</code> object) and <code>job_key</code>.</p></li>
-<li><p><code>event</code> is one of <code>pre</code>, <code>err</code>, or <code>post</code>, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the <code>restart</code> API command. If <code>generic_hook</code> is defined and activated, and <code>restart</code> is called, <code>generic_hook</code> will effectively run as <code>pre_restart</code>, <code>post_restart</code>, and <code>err_restart</code>. You can use a selection statement on this value so that <code>generic_hook</code> will act differently based on whether it is standing in for a <code>pre_</code>, <code>post_</code>, or <code>err_</code> hook.</p></li>
-<li><p><code>method_name</code> is the Client API method name whose execution is causing this execution of the <code>generic_hook</code>.</p></li>
-<li><p><code>args*</code>, <code>kw**</code> are the API method arguments and keyword arguments respectively.</p></li>
-<li><p><code>result_or_err</code> is a tri-state parameter taking one of these three values:</p>
-
-<ol>
-<li>None for <code>pre_</code>hooks</li>
-<li><code>result</code> for <code>post_</code> nooks</li>
-<li><code>exc</code> for <code>err_</code> hooks</li>
-</ol></li>
-</ul>
-
-<p>Example:</p>
-<pre class="highlight python"><span class="c"># Overrides the standard do-nothing generic_hook by adding a log writing operation.</span>
-<span class="kn">from</span> <span class="nn">twitter.common</span> <span class="kn">import</span> <span class="n">log</span>
-  <span class="k">class</span> <span class="nc">Logger</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span>
-    <span class="s">&#39;&#39;&#39;Adds to the log every time a hookable API method is called&#39;&#39;&#39;</span>
-    <span class="k">def</span> <span class="nf">generic_hook</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">hook_config</span><span class="p">,</span> <span class="n">event</span><span class="p">,</span> <span class="n">method_name</span><span class="p">,</span> <span class="n">result_or_err</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">kw</span><span class="p">)</span>
-      <span class="n">log</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s">&#39;</span><span class="si">%</span><span class="s">s: </span><span class="si">%</span><span class="s">s_</span><span class="si">%</span><span class="s">s of </span><span class="si">%</span><span class="s">s&#39;</span>
-               <span class="o">%</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">__class__</span><span class="o">.</span><span class="n">__name__</span><span class="p">,</span> <span class="n">event</span><span class="p">,</span> <span class="n">method_name</span><span class="p">,</span> <span class="n">hook_config</span><span class="o">.</span><span class="n">job_key</span><span class="p">))</span>
-</pre>
-<h2 id="hooks-process-checklist">Hooks Process Checklist</h2>
-
-<ol>
-<li>In your <code>.aurora</code> config file, add a <code>hooks</code> variable. Note that you may want to define a <code>.aurora</code> file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.</li>
-</ol>
-<pre class="highlight python"><span class="n">hooks</span> <span class="o">=</span> <span class="p">[]</span>
-</pre>
-<ol>
-<li>In the <code>hooks</code> variable, list all objects that define hooks used by <code>Job</code>s defined in this config:</li>
-</ol>
-<pre class="highlight python"><span class="n">hooks</span> <span class="o">=</span> <span class="p">[</span><span class="n">Object_hook_definer1</span><span class="p">,</span> <span class="n">Object_hook_definer2</span><span class="p">]</span>
-</pre>
-<ol>
-<li><p>For each job that uses hooks in this config file, add <code>enable_hooks = True</code> to the <code>Job</code> definition. Note that this is necessary even if you only want to use the generic hook.</p></li>
-<li><p>Write your <code>pre_</code>, <code>post_</code>, and <code>err_</code> hook definitions as part of an object definition in your <code>.aurora</code> config file.</p></li>
-<li><p>If desired, write your <code>generic_hook</code> definition as part of an object definition in your <code>.aurora</code> config file. Remember, the object must be listed as a member of <code>hooks</code>.</p></li>
-<li><p>If your Aurora command line command does not otherwise take an <code>.aurora</code> config file argument, add the appropriate <code>.aurora</code> file as an argument in order to define and activate the configuration&rsquo;s hooks.</p></li>
-</ol>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/index.html b/publish/documentation/latest/index.html
deleted file mode 100644
index 74ea210..0000000
--- a/publish/documentation/latest/index.html
+++ /dev/null
@@ -1,118 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="documentation">Documentation</h1>
-
-<h2 id="introduction">Introduction</h2>
-
-<p>Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos&#39; scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:</p>
-
-<ul>
-<li>Users: General information about the project and to learn how to run an Aurora job.</li>
-<li>Operators: For those that wish to manage and fine-tune an Aurora cluster.</li>
-<li>Developers: All the information you need to start modifying Aurora and contributing back to the project.</li>
-</ul>
-
-<p>This documentation is a work in progress, and we encourage you to ask questions on the <a href="http://aurora.incubator.apache.org/community/">Aurora developer list</a> or the <code>#aurora</code> IRC channel on <code>irc.freenode.net</code>.</p>
-
-<h2 id="users">Users</h2>
-
-<ul>
-<li><a href="/documentation/latest/vagrant/">Install Aurora on virtual machines on your private machine</a></li>
-<li><a href="/documentation/latest/tutorial/">Hello World Tutorial</a></li>
-<li><a href="/documentation/latest/user-guide/">User Guide</a></li>
-<li><a href="/documentation/latest/configuration-tutorial/">Configuration Tutorial</a></li>
-<li><a href="/documentation/latest/configuration-reference/">Aurora + Thermos Reference</a></li>
-<li><a href="/documentation/latest/client-commands/">Command Line Client</a></li>
-<li><a href="/documentation/latest/clientv2/">Aurora Client v2</a></li>
-<li><a href="/documentation/latest/cron-jobs/">Cron Jobs</a></li>
-</ul>
-
-<h2 id="operators">Operators</h2>
-
-<ul>
-<li><a href="/documentation/latest/deploying-aurora-scheduler/">Deploy Aurora</a></li>
-<li><a href="/documentation/latest/monitoring/">Monitoring</a></li>
-<li><a href="/documentation/latest/hooks/">Hooks for Aurora Client API</a></li>
-<li><a href="/documentation/latest/storage/">Scheduler Storage</a></li>
-<li><a href="/documentation/latest/storage-config/">Scheduler Storage and Maintenance</a></li>
-<li><a href="/documentation/latest/sla/">SLA Measurement</a></li>
-<li><a href="/documentation/latest/resource-isolation/">Resource Isolation and Sizing</a></li>
-<li><a href="/documentation/latest/test-resource-generation/">Generating test resources</a></li>
-</ul>
-
-<h2 id="developers">Developers</h2>
-
-<ul>
-<li><a href="/documentation/latest/contributing/">Contributing to the project</a></li>
-<li><a href="/documentation/latest/developing-aurora-scheduler/">Developing the Aurora Scheduler</a></li>
-<li><a href="/documentation/latest/developing-aurora-client/">Developing the Aurora Client</a></li>
-<li><a href="/documentation/latest/committers/">Committers Guide</a></li>
-</ul>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/monitoring/index.html b/publish/documentation/latest/monitoring/index.html
deleted file mode 100644
index a38ae69..0000000
--- a/publish/documentation/latest/monitoring/index.html
+++ /dev/null
@@ -1,315 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="monitoring-your-aurora-cluster">Monitoring your Aurora cluster</h1>
-
-<p>Before you start running important services in your Aurora cluster, it&rsquo;s important to set up
-monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
-since it will give you a global view of what&rsquo;s going on.</p>
-
-<h2 id="reading-stats">Reading stats</h2>
-
-<p>The scheduler exposes a <em>lot</em> of instrumentation data via its HTTP interface. You can get a quick
-peek at the first few of these in our vagrant image:</p>
-<pre class="highlight text">$ vagrant ssh -c &#39;curl -s localhost:8081/vars | head&#39;
-async_tasks_completed 1004
-attribute_store_fetch_all_events 15
-attribute_store_fetch_all_events_per_sec 0.0
-attribute_store_fetch_all_nanos_per_event 0.0
-attribute_store_fetch_all_nanos_total 3048285
-attribute_store_fetch_all_nanos_total_per_sec 0.0
-attribute_store_fetch_one_events 3391
-attribute_store_fetch_one_events_per_sec 0.0
-attribute_store_fetch_one_nanos_per_event 0.0
-attribute_store_fetch_one_nanos_total 454690753
-</pre>
-<p>These values are served as <code>Content-Type: text/plain</code>, with each line containing a space-separated metric
-name and value. Values may be integers, doubles, or strings (note: strings are static, others
-may be dynamic).</p>
-
-<p>If your monitoring infrastructure prefers JSON, the scheduler exports that as well:</p>
-<pre class="highlight text">$ vagrant ssh -c &#39;curl -s localhost:8081/vars.json | python -mjson.tool | head&#39;
-{
-    &quot;async_tasks_completed&quot;: 1009,
-    &quot;attribute_store_fetch_all_events&quot;: 15,
-    &quot;attribute_store_fetch_all_events_per_sec&quot;: 0.0,
-    &quot;attribute_store_fetch_all_nanos_per_event&quot;: 0.0,
-    &quot;attribute_store_fetch_all_nanos_total&quot;: 3048285,
-    &quot;attribute_store_fetch_all_nanos_total_per_sec&quot;: 0.0,
-    &quot;attribute_store_fetch_one_events&quot;: 3409,
-    &quot;attribute_store_fetch_one_events_per_sec&quot;: 0.0,
-    &quot;attribute_store_fetch_one_nanos_per_event&quot;: 0.0,
-</pre>
-<p>This will be the same data as above, served with <code>Content-Type: application/json</code>.</p>
-
-<h2 id="viewing-live-stat-samples-on-the-scheduler">Viewing live stat samples on the scheduler</h2>
-
-<p>The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
-of exported variables - nearly everything in <code>/vars</code> is available for instant graphing.  This is
-useful for debugging, but is not a replacement for an external monitoring system.</p>
-
-<p>You can view these graphs on a scheduler at <code>/graphview</code>.  It supports some composition and
-aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
-the scheduler running in vagrant, check out these links:
-<a href="http://192.168.33.7:8081/graphview?query=jvm_uptime_secs">simple graph</a>
-<a href="http://192.168.33.7:8081/graphview?query=rate(scheduler_log_native_append_nanos_total)%2Frate(scheduler_log_native_append_events)%2F1e6">complex composition</a></p>
-
-<h3 id="counters-and-gauges">Counters and gauges</h3>
-
-<p>Among numeric stats, there are two fundamental types of stats exported: <em>counters</em> and <em>gauges</em>.
-Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
-may decrease in value.  Aurora uses counters to represent things like the number of times an event
-has occurred, and gauges to capture things like the current length of a queue.  Counters are a
-natural fit for accurate composition into <a href="http://en.wikipedia.org/wiki/Rate_ratio">rate ratios</a>
-(useful for sample-resistant latency calculation), while gauges are not.</p>
-
-<h1 id="alerting">Alerting</h1>
-
-<h2 id="quickstart">Quickstart</h2>
-
-<p>If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
-on <code>framework_registered</code> and <code>task_store_LOST</code>. These will give you a decent picture of overall
-health.</p>
-
-<h2 id="a-note-on-thresholds">A note on thresholds</h2>
-
-<p>One of the most difficult things in monitoring is choosing alert thresholds. With many of these
-stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
-will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
-recommend you start with a strict value after viewing a small amount of collected data, and then
-adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
-and thresholds make sense.</p>
-
-<h4 id="jvm_uptime_secs"><code>jvm_uptime_secs</code></h4>
-
-<p>Type: integer counter</p>
-
-<h4 id="description">Description</h4>
-
-<p>The number of seconds the JVM process has been running. Comes from
-<a href="http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime()">RuntimeMXBean#getUptime()</a></p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
-stay alive.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>Look at the scheduler logs to identify the reason the scheduler is exiting.</p>
-
-<h4 id="system_load_avg"><code>system_load_avg</code></h4>
-
-<p>Type: double gauge</p>
-
-<h4 id="description">Description</h4>
-
-<p>The current load average of the system for the last minute. Comes from
-<a href="http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage()">OperatingSystemMXBean#getSystemLoadAverage()</a>.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>A high sustained value suggests that the scheduler machine may be over-utilized.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>Use standard unix tools like <code>top</code> and <code>ps</code> to track down the offending process(es).</p>
-
-<h4 id="process_cpu_cores_utilized"><code>process_cpu_cores_utilized</code></h4>
-
-<p>Type: double gauge</p>
-
-<h4 id="description">Description</h4>
-
-<p>The current number of CPU cores in use by the JVM process. This should not exceed the number of
-logical CPU cores on the machine. Derived from
-<a href="http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html">OperatingSystemMXBean#getProcessCpuTime()</a></p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>A high sustained value indicates that the scheduler is overworked. Due to current internal design
-limitations, if this value is sustained at <code>1</code>, there is a good chance the scheduler is under water.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>There are two main inputs that tend to drive this figure: task scheduling attempts and status
-updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
-time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
-triage this.  We suggest engaging with an Aurora developer.</p>
-
-<h4 id="task_store_lost"><code>task_store_LOST</code></h4>
-
-<p>Type: integer gauge</p>
-
-<h4 id="description">Description</h4>
-
-<p>The number of tasks stored in the scheduler that are in the <code>LOST</code> state, and have been rescheduled.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>If this value is increasing at a high rate, it is a sign of trouble.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>There are many sources of <code>LOST</code> tasks in Mesos: the scheduler, master, slave, and executor can all
-trigger this.  The first step is to look in the scheduler logs for <code>LOST</code> to identify where the
-state changes are originating.</p>
-
-<h4 id="scheduler_resource_offers"><code>scheduler_resource_offers</code></h4>
-
-<p>Type: integer counter</p>
-
-<h4 id="description">Description</h4>
-
-<p>The number of resource offers that the scheduler has received.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>For a healthy scheduler, this value must be increasing over time.</p>
-
-<h5 id="triage">Triage</h5>
-
-<p>Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
-is sending offers. You should also look at the master&rsquo;s web interface to see if it has a large
-number of outstanding offers that it is waiting to be returned.</p>
-
-<h4 id="framework_registered"><code>framework_registered</code></h4>
-
-<p>Type: binary integer counter</p>
-
-<h4 id="description">Description</h4>
-
-<p>Will be <code>1</code> for the leading scheduler that is registered with the Mesos master, <code>0</code> for passive
-schedulers,</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>A sustained period without a <code>1</code> (or where <code>sum() != 1</code>) warrants investigation.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
-multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
-bug.</p>
-
-<h4 id="rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)"><code>rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)</code></h4>
-
-<p>Type: rate ratio of integer counters</p>
-
-<h4 id="description">Description</h4>
-
-<p>This composes two counters to compute a windowed figure for the latency of replicated log writes.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>A hike in this value suggests disk bandwidth contention.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
-standard tools like <code>vmstat</code> and <code>iotop</code> to identify whether the disk has become slow or
-over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.</p>
-
-<h4 id="timed_out_tasks"><code>timed_out_tasks</code></h4>
-
-<p>Type: integer counter</p>
-
-<h4 id="description">Description</h4>
-
-<p>Tracks the number of times the scheduler has given up while waiting
-(for <code>-transient_task_state_timeout</code>) to hear back about a task that is in a transient state
-(e.g. <code>ASSIGNED</code>, <code>KILLING</code>), and has moved to <code>LOST</code> before rescheduling.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>This value is currently known to increase occasionally when the scheduler fails over
-(<a href="https://issues.apache.org/jira/browse/AURORA-740">AURORA-740</a>). However, any large spike in this
-value warrants investigation.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>The scheduler will log when it times out a task. You should trace the task ID of the timed out
-task into the master, slave, and/or executors to determine where the message was dropped.</p>
-
-<h4 id="http_500_responses_events"><code>http_500_responses_events</code></h4>
-
-<p>Type: integer counter</p>
-
-<h4 id="description">Description</h4>
-
-<p>The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.</p>
-
-<h4 id="alerting">Alerting</h4>
-
-<p>An increase warrants investigation.</p>
-
-<h4 id="triage">Triage</h4>
-
-<p>Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/resource-isolation/index.html b/publish/documentation/latest/resource-isolation/index.html
deleted file mode 100644
index 8768a79..0000000
--- a/publish/documentation/latest/resource-isolation/index.html
+++ /dev/null
@@ -1,220 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="resource-isolation-and-sizing">Resource Isolation and Sizing</h1>
-
-<p><strong>NOTE</strong>: Resource Isolation and Sizing is very much a work in progress.
-Both user-facing aspects and how it works under the hood are subject to
-change.</p>
-
-<ul>
-<li><a href="#introduction">Introduction</a></li>
-<li><a href="#cpu-isolation">CPU Isolation</a></li>
-<li><a href="#cpu-sizing">CPU Sizing</a></li>
-<li><a href="#memory-isolation">Memory Isolation</a></li>
-<li><a href="#memory-sizing">Memory Sizing</a></li>
-<li><a href="#disk-space">Disk Space</a></li>
-<li><a href="#disk-space-sizing">Disk Space Sizing</a></li>
-<li><a href="#other-resources">Other Resources</a></li>
-</ul>
-
-<h2 id="introduction">Introduction</h2>
-
-<p>Aurora is a multi-tenant system; a single software instance runs on a
-server, serving multiple clients/tenants. To share resources among
-tenants, it implements isolation of:</p>
-
-<ul>
-<li>CPU</li>
-<li>memory</li>
-<li>disk space</li>
-</ul>
-
-<p>CPU is a soft limit, and handled differently from memory and disk space.
-Too low a CPU value results in throttling your application and
-slowing it down. Memory and disk space are both hard limits; when your
-application goes over these values, it&rsquo;s killed.</p>
-
-<p>Let&rsquo;s look at each resource type in more detail:</p>
-
-<h2 id="cpu-isolation">CPU Isolation</h2>
-
-<p>Mesos uses a quota based CPU scheduler (the <em>Completely Fair Scheduler</em>)
-to provide consistent and predictable performance.  This is effectively
-a guarantee of resources &ndash; you receive at least what you requested, but
-also no more than you&rsquo;ve requested.</p>
-
-<p>The scheduler gives applications a CPU quota for every 100 ms interval.
-When an application uses its quota for an interval, it is throttled for
-the rest of the 100 ms. Usage resets for each interval and unused
-quota does not carry over.</p>
-
-<p>For example, an application specifying 4.0 CPU has access to 400 ms of
-CPU time every 100 ms. This CPU quota can be used in different ways,
-depending on the application and available resources. Consider the
-scenarios shown in this diagram.</p>
-
-<p><img alt="CPU Availability" src="../images/CPUavailability.png" /></p>
-
-<ul>
-<li><p><em>Scenario A</em>: the application can use up to 4 cores continuously for
-every 100 ms interval. It is never throttled and starts processing
-new requests immediately.</p></li>
-<li><p><em>Scenario B</em> : the application uses up to 8 cores (depending on
-availability) but is throttled after 50 ms. The CPU quota resets at the
-start of each new 100 ms interval.</p></li>
-<li><p><em>Scenario C</em> : is like Scenario A, but there is a garbage collection
-event in the second interval that consumes all CPU quota. The
-application throttles for the remaining 75 ms of that interval and
-cannot service requests until the next interval. In this example, the
-garbage collection finished in one interval but, depending on how much
-garbage needs collecting, it may take more than one interval and further
-delay service of requests.</p></li>
-</ul>
-
-<p><em>Technical Note</em>: Mesos considers logical cores, also known as
-hyperthreading or SMT cores, as the unit of CPU.</p>
-
-<h2 id="cpu-sizing">CPU Sizing</h2>
-
-<p>To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
-that lets the task run at its desired performance when at peak load
-distributed across all shards. Include reserve capacity of at least 50%,
-possibly more, depending on how critical your service is (or how
-confident you are about your original estimate : -)), ideally by
-increasing the number of shards to also improve resiliency. When running
-your application, observe its CPU stats over time. If consistently at or
-near your quota during peak load, you should consider increasing either
-per-shard CPU or the number of shards.</p>
-
-<h2 id="memory-isolation">Memory Isolation</h2>
-
-<p>Mesos uses dedicated memory allocation. Your application always has
-access to the amount of memory specified in your configuration. The
-application&rsquo;s memory use is defined as the sum of the resident set size
-(RSS) of all processes in a shard. Each shard is considered
-independently.</p>
-
-<p>In other words, say you specified a memory size of 10GB. Each shard
-would receive 10GB of memory. If an individual shard&rsquo;s memory demands
-exceed 10GB, that shard is killed, but the other shards continue
-working.</p>
-
-<p><em>Technical note</em>: Total memory size is not enforced at allocation time,
-so your application can request more than its allocation without getting
-an ENOMEM. However, it will be killed shortly after.</p>
-
-<h2 id="memory-sizing">Memory Sizing</h2>
-
-<p>Size for your application&rsquo;s peak requirement. Observe the per-instance
-memory statistics over time, as memory requirements can vary over
-different periods. Remember that if your application exceeds its memory
-value, it will be killed, so you should also add a safety margin of
-around 10-20%. If you have the ability to do so, you may also want to
-put alerts on the per-instance memory.</p>
-
-<h2 id="disk-space">Disk Space</h2>
-
-<p>Disk space used by your application is defined as the sum of the files&#39;
-disk space in your application&rsquo;s directory, including the <code>stdout</code> and
-<code>stderr</code> logged from your application. Each shard is considered
-independently. You should use off-node storage for your application&rsquo;s
-data whenever possible.</p>
-
-<p>In other words, say you specified disk space size of 100MB. Each shard
-would receive 100MB of disk space. If an individual shard&rsquo;s disk space
-demands exceed 100MB, that shard is killed, but the other shards
-continue working.</p>
-
-<p>After your application finishes running, its allocated disk space is
-reclaimed. Thus, your job&rsquo;s final action should move any disk content
-that you want to keep, such as logs, to your home file system or other
-less transitory storage. Disk reclamation takes place an undefined
-period after the application finish time; until then, the disk contents
-are still available but you shouldn&rsquo;t count on them being so.</p>
-
-<p><em>Technical note</em> : Disk space is not enforced at write so your
-application can write above its quota without getting an ENOSPC, but it
-will be killed shortly after. This is subject to change.</p>
-
-<h2 id="disk-space-sizing">Disk Space Sizing</h2>
-
-<p>Size for your application&rsquo;s peak requirement. Rotate and discard log
-files as needed to stay within your quota. When running a Java process,
-add the maximum size of the Java heap to your disk space requirement, in
-order to account for an out of memory error dumping the heap
-into the application&rsquo;s sandbox space.</p>
-
-<h2 id="other-resources">Other Resources</h2>
-
-<p>Other resources, such as network bandwidth, do not have any performance
-guarantees. For some resources, such as memory bandwidth, there are no
-practical sharing methods so some application combinations collocated on
-the same host may cause contention.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/scheduler-storage/index.html b/publish/documentation/latest/scheduler-storage/index.html
deleted file mode 100644
index cd2c25d..0000000
--- a/publish/documentation/latest/scheduler-storage/index.html
+++ /dev/null
@@ -1,117 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="snapshot-performance">Snapshot Performance</h1>
-
-<p>Periodically the scheduler writes a full snapshot of its state to the replicated log. To do this
-it needs to hold a global storage write lock while it writes out this data. In large clusters
-this has been observed to take up to 40 seconds. Long pauses can cause issues in the system,
-including delays in scheduling new tasks.</p>
-
-<p>The scheduler has two optimizations to reduce the size of snapshots and thus improve snapshot
-performance: compression and deduplication. Most users will want to enable both compression
-and deduplication.</p>
-
-<h2 id="compression">Compression</h2>
-
-<p>To reduce the size of the snapshot the DEFLATE algorithm can be applied to the serialized bytes
-of the snapshot as they are written to the stream. This reduces the total number of bytes that
-need to be written to the replicated log at the cost of CPU and generally reduces the amount
-of time a snapshot takes. Most users will want to enable both compression and deduplication.</p>
-
-<h3 id="enabling-compression">Enabling Compression</h3>
-
-<p>Snapshot compression is enabled via the <code>-deflate_snapshots</code> flag. This is the default since
-Aurora 0.5.0. All released versions of Aurora can read both compressed and uncompressed snapshots,
-so there are no backwards compatibility concerns associated with changing this flag.</p>
-
-<h3 id="disabling-compression">Disabling compression</h3>
-
-<p>Disable compression by passing <code>-deflate_snapshots=false</code>.</p>
-
-<h2 id="deduplication">Deduplication</h2>
-
-<p>In Aurora 0.6.0 a new snapshot format was introduced. Rather than write one configuration blob
-per Mesos task this format stores each configuration blob once, and each Mesos task with a
-pointer to its blob. This format is not backwards compatible with earlier versions of Aurora.</p>
-
-<h3 id="enabling-deduplication">Enabling Deduplication</h3>
-
-<p>After upgrading Aurora to 0.6.0, enable deduplication with the <code>-deduplicate_snapshots</code> flag.
-After the first snapshot the cluster will be using the deduplicated format to write to the
-replicated log. Snapshots are created periodically by the scheduler (according to
-the <code>-dlog_snapshot_interval</code> flag). An administrator can also force a snapshot operation with
-<code>aurora_admin snapshot</code>.</p>
-
-<h3 id="disabling-deduplication">Disabling Deduplication</h3>
-
-<p>To disable deduplication, for example to rollback to Aurora, restart all of the cluster&rsquo;s
-schedulers with <code>-deduplicate_snapshots=false</code> and either wait for a snapshot or force one
-using <code>aurora_admin snapshot</code>.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/sla/index.html b/publish/documentation/latest/sla/index.html
deleted file mode 100644
index 4dc7d6b..0000000
--- a/publish/documentation/latest/sla/index.html
+++ /dev/null
@@ -1,265 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h2 id="aurora-sla-measurement">Aurora SLA Measurement</h2>
-
-<ul>
-<li><a href="#overview">Overview</a></li>
-<li><a href="#metric-details">Metric Details</a>
-
-<ul>
-<li><a href="#platform-uptime">Platform Uptime</a></li>
-<li><a href="#job-uptime">Job Uptime</a></li>
-<li><a href="#median-time-to-assigned-(mtta)">Median Time To Assigned (MTTA)</a></li>
-<li><a href="#median-time-to-running-(mttr)">Median Time To Running (MTTR)</a></li>
-</ul></li>
-<li><a href="#limitations">Limitations</a></li>
-</ul>
-
-<h2 id="overview">Overview</h2>
-
-<p>The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
-Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
-and hosted services.</p>
-
-<p>The Aurora SLA feature currently supports stat collection only for service (non-cron)
-production jobs (<code>&quot;production = True&quot;</code> in your <code>.aurora</code> config).</p>
-
-<p>Counters that track SLA measurements are computed periodically within the scheduler.
-The individual instance metrics are refreshed every minute (configurable via
-<code>sla_stat_refresh_interval</code>). The instance counters are subsequently aggregated by
-relevant grouping types before exporting to scheduler <code>/vars</code> endpoint (when using <code>vagrant</code>
-that would be <code>http://192.168.33.7:8081/vars</code>)</p>
-
-<h2 id="metric-details">Metric Details</h2>
-
-<h3 id="platform-uptime">Platform Uptime</h3>
-
-<p><em>Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
-or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
-system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
-will not degrade this metric.</em></p>
-
-<p><strong>Collection scope:</strong></p>
-
-<ul>
-<li>Per job - <code>sla_&lt;job_key&gt;_platform_uptime_percent</code></li>
-<li>Per cluster - <code>sla_cluster_platform_uptime_percent</code></li>
-</ul>
-
-<p><strong>Units:</strong> percent</p>
-
-<p>A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
-or lose track of the task existence. In such cases, the service task is marked as LOST and
-rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
-for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
-task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.</p>
-
-<p>Another example of a platform downtime event is the administrator-requested task rescheduling. This
-happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
-rescheduled elsewhere.</p>
-
-<p>To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
-actions that put a service instance in a non-operational state. It is simpler to isolate
-user-incurred downtime and treat all other downtime as platform incurred.</p>
-
-<p>Currently, a user can cause a healthy service (task) downtime in only two ways: via <code>killTasks</code>
-or <code>restartShards</code> RPCs. For both, their affected tasks leave an audit state transition trail
-relevant to uptime calculations. By applying a special &ldquo;SLA meaning&rdquo; to exposed task state
-transition records, we can build a deterministic downtime trace for every given service instance.</p>
-
-<p>A task going through a state transition carries one of three possible SLA meanings
-(see <a href="../src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java">SlaAlgorithm.java</a> for
-sla-to-task-state mapping):</p>
-
-<ul>
-<li><p>Task is UP: starts a period where the task is considered to be up and running from the Aurora
-platform standpoint.</p></li>
-<li><p>Task is DOWN: starts a period where the task cannot reach the UP state for some
-non-user-related reason. Counts towards instance downtime.</p></li>
-<li><p>Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
-user initiated action or failure. We ignore this period for the uptime calculation purposes.</p></li>
-</ul>
-
-<p>This metric is recalculated over the last sampling period (last minute) to account for
-any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
-sampling interval as well as adjacent REMOVED events.</p>
-
-<h3 id="job-uptime">Job Uptime</h3>
-
-<p><em>Percentage of the job instances considered to be in RUNNING state for the specified duration
-relative to request time. This is a purely application side metric that is considering aggregate
-uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
-this metric.</em></p>
-
-<p><strong>Collection scope:</strong> We currently expose job uptime values at 5 pre-defined
-percentiles (50th,75th,90th,95th and 99th):</p>
-
-<ul>
-<li><code>sla_&lt;job_key&gt;_job_uptime_50_00_sec</code></li>
-<li><code>sla_&lt;job_key&gt;_job_uptime_75_00_sec</code></li>
-<li><code>sla_&lt;job_key&gt;_job_uptime_90_00_sec</code></li>
-<li><code>sla_&lt;job_key&gt;_job_uptime_95_00_sec</code></li>
-<li><code>sla_&lt;job_key&gt;_job_uptime_99_00_sec</code></li>
-</ul>
-
-<p><strong>Units:</strong> seconds
-You can also get customized real-time stats from aurora client. See <code>aurora sla -h</code> for
-more details.</p>
-
-<h3 id="median-time-to-assigned-(mtta)">Median Time To Assigned (MTTA)</h3>
-
-<p><em>Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
-metric that helps track the dependency of scheduling performance on the requested resources
-(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).</em></p>
-
-<p><strong>Collection scope:</strong></p>
-
-<ul>
-<li>Per job - <code>sla_&lt;job_key&gt;_mtta_ms</code></li>
-<li>Per cluster - <code>sla_cluster_mtta_ms</code></li>
-<li>Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
-<a href="../src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java">ResourceAggregates.java</a>
-
-<ul>
-<li>By CPU:</li>
-<li><code>sla_cpu_small_mtta_ms</code></li>
-<li><code>sla_cpu_medium_mtta_ms</code></li>
-<li><code>sla_cpu_large_mtta_ms</code></li>
-<li><code>sla_cpu_xlarge_mtta_ms</code></li>
-<li><code>sla_cpu_xxlarge_mtta_ms</code></li>
-<li>By RAM:</li>
-<li><code>sla_ram_small_mtta_ms</code></li>
-<li><code>sla_ram_medium_mtta_ms</code></li>
-<li><code>sla_ram_large_mtta_ms</code></li>
-<li><code>sla_ram_xlarge_mtta_ms</code></li>
-<li><code>sla_ram_xxlarge_mtta_ms</code></li>
-<li>By DISK:</li>
-<li><code>sla_disk_small_mtta_ms</code></li>
-<li><code>sla_disk_medium_mtta_ms</code></li>
-<li><code>sla_disk_large_mtta_ms</code></li>
-<li><code>sla_disk_xlarge_mtta_ms</code></li>
-<li><code>sla_disk_xxlarge_mtta_ms</code></li>
-</ul></li>
-</ul>
-
-<p><strong>Units:</strong> milliseconds</p>
-
-<p>MTTA only considers instances that have already reached ASSIGNED state and ignores those
-that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
-constraints) do not affect metric curves.</p>
-
-<h3 id="median-time-to-running-(mttr)">Median Time To Running (MTTR)</h3>
-
-<p><em>Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
-reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.</em></p>
-
-<p><strong>Collection scope:</strong></p>
-
-<ul>
-<li>Per job - <code>sla_&lt;job_key&gt;_mttr_ms</code></li>
-<li>Per cluster - <code>sla_cluster_mttr_ms</code></li>
-<li>Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
-<a href="../src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java">ResourceAggregates.java</a>
-
-<ul>
-<li>By CPU:</li>
-<li><code>sla_cpu_small_mttr_ms</code></li>
-<li><code>sla_cpu_medium_mttr_ms</code></li>
-<li><code>sla_cpu_large_mttr_ms</code></li>
-<li><code>sla_cpu_xlarge_mttr_ms</code></li>
-<li><code>sla_cpu_xxlarge_mttr_ms</code></li>
-<li>By RAM:</li>
-<li><code>sla_ram_small_mttr_ms</code></li>
-<li><code>sla_ram_medium_mttr_ms</code></li>
-<li><code>sla_ram_large_mttr_ms</code></li>
-<li><code>sla_ram_xlarge_mttr_ms</code></li>
-<li><code>sla_ram_xxlarge_mttr_ms</code></li>
-<li>By DISK:</li>
-<li><code>sla_disk_small_mttr_ms</code></li>
-<li><code>sla_disk_medium_mttr_ms</code></li>
-<li><code>sla_disk_large_mttr_ms</code></li>
-<li><code>sla_disk_xlarge_mttr_ms</code></li>
-<li><code>sla_disk_xxlarge_mttr_ms</code></li>
-</ul></li>
-</ul>
-
-<p><strong>Units:</strong> milliseconds</p>
-
-<p>MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
-unreasonable resource constraints) do not affect metric curves.</p>
-
-<h2 id="limitations">Limitations</h2>
-
-<ul>
-<li><p>The availability of Aurora SLA metrics is bound by the scheduler availability.</p></li>
-<li><p>All metrics are calculated at a pre-defined interval (currently set at 1 minute).
-Scheduler restarts may result in missed collections.</p></li>
-</ul>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/storage-config/index.html b/publish/documentation/latest/storage-config/index.html
deleted file mode 100644
index 0fcf513..0000000
--- a/publish/documentation/latest/storage-config/index.html
+++ /dev/null
@@ -1,234 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="storage-configuration-and-maintenance">Storage Configuration And Maintenance</h1>
-
-<ul>
-<li><a href="#overview">Overview</a></li>
-<li><a href="#scheduler-storage-configuration-flags">Scheduler storage configuration flags</a>
-
-<ul>
-<li><a href="#mesos-replicated-log-configuration-flags">Mesos replicated log configuration flags</a></li>
-<li><a href="#-native_log_quorum_size">-native<em>log</em>quorum_size</a></li>
-<li><a href="#-native_log_file_path">-native<em>log</em>file_path</a></li>
-<li><a href="#-native_log_zk_group_path">-native<em>log</em>zk<em>group</em>path</a></li>
-<li><a href="#backup-configuration-flags">Backup configuration flags</a></li>
-<li><a href="#-backup_interval">-backup_interval</a></li>
-<li><a href="#-backup_dir">-backup_dir</a></li>
-<li><a href="#-max_saved_backups">-max<em>saved</em>backups</a></li>
-</ul></li>
-<li><a href="#recovering-from-a-scheduler-backup">Recovering from a scheduler backup</a>
-
-<ul>
-<li><a href="#summary">Summary</a></li>
-<li><a href="#preparation">Preparation</a></li>
-<li><a href="#cleanup-and-re-initialize-mesos-replicated-log">Cleanup and re-initialize Mesos replicated log</a></li>
-<li><a href="#restore-from-backup">Restore from backup</a></li>
-<li><a href="#cleanup">Cleanup</a></li>
-</ul></li>
-</ul>
-
-<h2 id="overview">Overview</h2>
-
-<p>This document summarizes Aurora storage configuration and maintenance details and is
-intended for use by anyone deploying and/or maintaining Aurora.</p>
-
-<p>For a high level overview of the Aurora storage architecture refer to <a href="/documentation/latest/storage/">this document</a>.</p>
-
-<h2 id="scheduler-storage-configuration-flags">Scheduler storage configuration flags</h2>
-
-<p>Below is a summary of scheduler storage configuration flags that either don&rsquo;t have default values
-or require attention before deploying in a production environment.</p>
-
-<h3 id="mesos-replicated-log-configuration-flags">Mesos replicated log configuration flags</h3>
-
-<h4 id="-nativelogquorum_size">-native<em>log</em>quorum_size</h4>
-
-<p>Defines the Mesos replicated log quorum size. See
-<a href="deploying-aurora-scheduler.md#replicated-log-configuration">the replicated log configuration document</a>
-on how to choose the right value.</p>
-
-<h4 id="-nativelogfile_path">-native<em>log</em>file_path</h4>
-
-<p>Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
-for Mesos replicated log files to ensure optimal storage performance.</p>
-
-<h4 id="-nativelogzkgrouppath">-native<em>log</em>zk<em>group</em>path</h4>
-
-<p>ZooKeeper path used for Mesos replicated log quorum discovery.</p>
-
-<p>See <a href="../src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java">code</a> for
-other available Mesos replicated log configuration options and default values.</p>
-
-<h3 id="backup-configuration-flags">Backup configuration flags</h3>
-
-<p>Configuration options for the Aurora scheduler backup manager.</p>
-
-<h4 id="-backup_interval">-backup_interval</h4>
-
-<p>The interval on which the scheduler writes local storage backups.  The default is every hour.</p>
-
-<h4 id="-backup_dir">-backup_dir</h4>
-
-<p>Directory to write backups to.</p>
-
-<h4 id="-maxsavedbackups">-max<em>saved</em>backups</h4>
-
-<p>Maximum number of backups to retain before deleting the oldest backup(s).</p>
-
-<h2 id="recovering-from-a-scheduler-backup">Recovering from a scheduler backup</h2>
-
-<ul>
-<li><a href="#overview">Overview</a></li>
-<li><a href="#preparation">Preparation</a></li>
-<li><a href="#assess-mesos-replicated-log-damage">Assess Mesos replicated log damage</a></li>
-<li><a href="#restore-from-backup">Restore from backup</a></li>
-<li><a href="#cleanup">Cleanup</a></li>
-</ul>
-
-<p><strong>Be sure to read the entire page before attempting to restore from a backup, as it may have
-unintended consequences.</strong></p>
-
-<h3 id="summary">Summary</h3>
-
-<p>The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
-earlier, backed up, version and requires all schedulers to be taken down temporarily while
-restoring. Once completed, the scheduler state resets to what it was when the backup was created.
-This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
-be killed shortly after the cluster restarts. All other tasks continue operating as normal.</p>
-
-<p>Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
-hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
-so any tasks that have been rescheduled since the backup was taken will be killed.</p>
-
-<h3 id="preparation">Preparation</h3>
-
-<p>Follow these steps to prepare the cluster for restoring from a backup:</p>
-
-<ul>
-<li><p>Stop all scheduler instances</p></li>
-<li><p>Consider blocking external traffic on a port defined in <code>-http_port</code> for all schedulers to
-prevent users from interacting with the scheduler during the restoration process. This will help
-troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
-be erased after the backup snapshot is restored</p></li>
-<li><p>Next steps are required to put scheduler into a partially disabled state where it would still be
-able to accept storage recovery requests but unable to schedule or change task states. This may be
-accomplished by updating the following scheduler configuration options:</p>
-
-<ul>
-<li>Set <code>-mesos_master_address</code> to a non-existent zk address. This will prevent scheduler from
-registering with Mesos. E.g.: <code>-mesos_master_address=zk://localhost:2181</code></li>
-<li><code>-max_registration_delay</code> - set to sufficiently long interval to prevent registration timeout
-and as a result scheduler suicide. E.g: <code>-max_registration_delay=360min</code></li>
-<li>Make sure <code>-gc_executor_path</code> option is not set to prevent accidental task GC. This is
-important as scheduler will attempt to reconcile the cluster state and will kill all tasks when
-restarted with an empty Mesos replicated log.</li>
-</ul></li>
-<li><p>Restart all schedulers</p></li>
-</ul>
-
-<h3 id="cleanup-and-re-initialize-mesos-replicated-log">Cleanup and re-initialize Mesos replicated log</h3>
-
-<p>Get rid of the corrupted files and re-initialize Mesos replicate log:</p>
-
-<ul>
-<li>Stop schedulers</li>
-<li>Delete all files under <code>-native_log_file_path</code> on all schedulers</li>
-<li>Initialize Mesos replica&rsquo;s log file: <code>mesos-log initialize &lt;-native_log_file_path&gt;</code></li>
-<li>Restart schedulers</li>
-</ul>
-
-<h3 id="restore-from-backup">Restore from backup</h3>
-
-<p>At this point the scheduler is ready to rehydrate from the backup:</p>
-
-<ul>
-<li><p>Identify the leading scheduler by:</p>
-
-<ul>
-<li>running <code>aurora_admin get_scheduler &lt;cluster&gt;</code> - if scheduler is responsive</li>
-<li>examining scheduler logs</li>
-<li>or examining Zookeeper registration under the path defined by <code>-zk_endpoints</code>
-and <code>-serverset_path</code></li>
-</ul></li>
-<li><p>Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
-the following command on a leader
-<code>aurora_admin scheduler_stage_recovery &lt;cluster&gt; scheduler-backup-&lt;yyyy-MM-dd-HH-mm&gt;</code></p></li>
-<li><p>At this point, the recovery snapshot is staged and available for manual verification/modification
-via <code>aurora_admin scheduler_print_recovery_tasks</code> and <code>scheduler_delete_recovery_tasks</code> commands.
-See <code>aurora_admin help &lt;command&gt;</code> for usage details.</p></li>
-<li><p>Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
-the provided backup snapshot and initiate a mandatory failover
-<code>aurora_admin scheduler_commit_recovery &lt;cluster&gt;</code></p></li>
-</ul>
-
-<h3 id="cleanup">Cleanup</h3>
-
-<p>Undo any modification done during <a href="#preparation">Preparation</a> sequence.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/storage/index.html b/publish/documentation/latest/storage/index.html
deleted file mode 100644
index 8994195..0000000
--- a/publish/documentation/latest/storage/index.html
+++ /dev/null
@@ -1,166 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="aurora-scheduler-storage">Aurora Scheduler Storage</h1>
-
-<ul>
-<li><a href="#overview">Overview</a></li>
-<li><a href="#reads-writes-modifications">Reads, writes, modifications</a>
-
-<ul>
-<li><a href="#read-lifecycle">Read lifecycle</a></li>
-<li><a href="#write-lifecycle">Write lifecycle</a></li>
-</ul></li>
-<li><a href="#atomicity-consistency-and-isolation">Atomicity, consistency and isolation</a></li>
-<li><a href="#population-on-restart">Population on restart</a></li>
-</ul>
-
-<h2 id="overview">Overview</h2>
-
-<p>Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
-For example:</p>
-
-<ul>
-<li>Task configurations and scheduled task instances</li>
-<li>Job update configurations and update progress</li>
-<li>Production resource quotas</li>
-<li>Mesos resource offer host attributes</li>
-</ul>
-
-<p>Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
-log <a href="https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf">[1]</a>
-<a href="http://en.wikipedia.org/wiki/State_machine_replication">[2]</a> with a key-value
-<a href="https://github.com/google/leveldb">LevelDB</a> storage as persistence media.</p>
-
-<p>Conceptually, it can be represented by the following major components:</p>
-
-<ul>
-<li>Volatile storage: in-memory cache of all available data. Implemented via in-memory
-<a href="http://www.h2database.com/html/main.html">H2 Database</a> and accessed via
-<a href="http://mybatis.github.io/mybatis-3/">MyBatis</a>.</li>
-<li>Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
-is <a href="https://github.com/apache/thrift">thrift</a>. Data is stored in serialized binary form.</li>
-<li>Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
-This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
-restart.</li>
-<li>Backup manager: as a precaution, snapshots are periodically written out into backup files.
-This solves a <a href="storage-config.md#recovering-from-a-scheduler-backup">disaster recovery problem</a>
-in case of a complete loss or corruption of Mesos log files.</li>
-</ul>
-
-<p><img alt="Storage hierarchy" src="../images/storage_hierarchy.png" /></p>
-
-<h2 id="reads,-writes,-modifications">Reads, writes, modifications</h2>
-
-<p>All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
-grouped by the type of data they serve. Every interface defines a specific set of operations allowed
-on the data thus abstracting out the storage access and the actual persistence implementation. The
-latter is especially important in view of a general immutability of persisted data. With the Mesos
-replicated log as the underlying persistence solution, data can be read and written easily but not
-modified. All modifications are simulated by saving new versions of modified objects. This feature
-and general performance considerations justify the existence of the volatile in-memory store.</p>
-
-<h3 id="read-lifecycle">Read lifecycle</h3>
-
-<p>There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
-is explained <a href="#atomicity-and-isolation">below</a>.</p>
-
-<p>All reads are served from the volatile storage making reads generally cheap storage operations
-from the performance standpoint. The majority of the volatile stores are represented by the
-in-memory H2 database. This allows for rich schema definitions, queries and relationships that
-key-value storage is unable to match.</p>
-
-<h3 id="write-lifecycle">Write lifecycle</h3>
-
-<p>Writes are more involved operations since in addition to updating the volatile store data has to be
-appended to the replicated log. Data is not available for reads until fully ack-ed by both
-replicated log and volatile storage.</p>
-
-<h2 id="atomicity,-consistency-and-isolation">Atomicity, consistency and isolation</h2>
-
-<p>Aurora uses <a href="http://en.wikipedia.org/wiki/Write-ahead_logging">write-ahead logging</a> to ensure
-consistency between replicated and volatile storage. In Aurora, data is first written into the
-replicated log and only then updated in the volatile store.</p>
-
-<p>Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
-available data. The available <code>Storage</code> interface exposes 3 major types of operations:
-* <code>consistentRead</code> - access is locked using reader&rsquo;s lock and provides consistent view on read
-* <code>weaklyConsistentRead</code> - access is lock-less. Delivers best contention performance but may result
-in stale reads
-* <code>write</code> - access is fully serialized by using writer&rsquo;s lock. Operation success requires both
-volatile and replicated writes to succeed.</p>
-
-<p>The consistency of the volatile store is enforced via H2 transactional isolation.</p>
-
-<h2 id="population-on-restart">Population on restart</h2>
-
-<p>Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
-in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
-recover the state up to the last write.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/test-resource-generation/index.html b/publish/documentation/latest/test-resource-generation/index.html
deleted file mode 100644
index 846a2f1..0000000
--- a/publish/documentation/latest/test-resource-generation/index.html
+++ /dev/null
@@ -1,97 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="generating-test-resources">Generating test resources</h1>
-
-<h2 id="background">Background</h2>
-
-<p>The Aurora source repository and distributions contain several
-<a href="../src/test/resources/org/apache/thermos/root/checkpoints">binary files</a> to
-qualify the backwards-compatibility of thermos with checkpoint data. Since
-thermos persists state to disk, to be read by other components (the GC executor
-and the thermos observer), it is important that we have tests that prevent
-regressions affecting the ability to parse previously-written data.</p>
-
-<h2 id="generating-test-files">Generating test files</h2>
-
-<p>The files included represent persisted checkpoints that exercise different
-features of thermos. The existing files should not be modified unless
-we are accepting backwards incompatibility, such as with a major release.</p>
-
-<p>It is not practical to write source code to generate these files on the fly,
-as source would be vulnerable to drift (e.g. due to refactoring) in ways
-that would undermine the goal of ensuring backwards compatibility.</p>
-
-<p>The most common reason to add a new checkpoint file would be to provide
-coverage for new thermos features that alter the data format. This is
-accomplished by writing and running a
-<a href="/documentation/latest/configuration-reference/">job configuration</a> that exercises the feature, and
-copying the checkpoint file from the sandbox directory, by default this is
-<code>/var/run/thermos/checkpoints/&lt;aurora task id&gt;</code>.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/tutorial/index.html b/publish/documentation/latest/tutorial/index.html
deleted file mode 100644
index 7464a75..0000000
--- a/publish/documentation/latest/tutorial/index.html
+++ /dev/null
@@ -1,327 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h2 id="aurora-tutorial">Aurora Tutorial</h2>
-
-<ul>
-<li><a href="#introduction">Introduction</a></li>
-<li><a href="#setup-install-aurora">Setup: Install Aurora</a></li>
-<li><a href="#the-script">The Script</a></li>
-<li><a href="#aurora-configuration">Aurora Configuration</a></li>
-<li><a href="#whats-going-on-in-that-configuration-file">What&rsquo;s Going On In That Configuration File?</a></li>
-<li><a href="#creating-the-job">Creating the Job</a></li>
-<li><a href="#watching-the-job-run">Watching the Job Run</a></li>
-<li><a href="#cleanup">Cleanup</a></li>
-<li><a href="#next-steps">Next Steps</a></li>
-</ul>
-
-<h2 id="introduction">Introduction</h2>
-
-<p>This tutorial shows how to use the Aurora scheduler to run (and
-&ldquo;<code>printf-debug</code>&rdquo;) a hello world program on Mesos. The operational
-hierarchy is:</p>
-
-<ul>
-<li>Aurora manages and schedules jobs for Mesos to run.</li>
-<li>Mesos manages the individual tasks that make up a job.</li>
-<li>Thermos manages the individual processes that make up a task.</li>
-</ul>
-
-<p>This is the recommended first Aurora users document to read to start
-getting up to speed on the system.</p>
-
-<p>To get help, email questions to the Aurora Developer List,
-<a href="mailto:dev@aurora.incubator.apache.org">dev@aurora.incubator.apache.org</a></p>
-
-<h2 id="setup:-install-aurora">Setup: Install Aurora</h2>
-
-<p>You use the Aurora client and web UI to interact with Aurora jobs. To
-install it locally, see <a href="/documentation/latest/vagrant/">vagrant.md</a>. The remainder of this
-Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
-all commands are to be run from the root of the aurora repository clone.</p>
-
-<h2 id="the-script">The Script</h2>
-
-<p>Our &ldquo;hello world&rdquo; application is a simple Python script that loops
-forever, displaying the time every few seconds. Copy the code below and
-put it in a file named <code>hello_world.py</code> in the root of your Aurora repository clone (Note:
-this directory is the same as <code>/vagrant</code> inside the Vagrant VMs).</p>
-
-<p>The script has an intentional bug, which we will explain later on.</p>
-
-<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
--->
-<pre class="highlight python"><span class="kn">import</span> <span class="nn">sys</span>
-<span class="kn">import</span> <span class="nn">time</span>
-
-<span class="k">def</span> <span class="nf">main</span><span class="p">(</span><span class="n">argv</span><span class="p">):</span>
-  <span class="n">SLEEP_DELAY</span> <span class="o">=</span> <span class="mi">10</span>
-  <span class="c"># Python ninjas - ignore this blatant bug.</span>
-  <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="n">xrang</span><span class="p">(</span><span class="mi">100</span><span class="p">):</span>
-    <span class="k">print</span><span class="p">(</span><span class="s">&quot;Hello world! The time is now: </span><span class="si">%</span><span class="s">s. Sleeping for </span><span class="si">%</span><span class="s">d secs&quot;</span> <span class="o">%</span> <span class="p">(</span>
-      <span class="n">time</span><span class="o">.</span><span class="n">asctime</span><span class="p">(),</span> <span class="n">SLEEP_DELAY</span><span class="p">))</span>
-    <span class="n">sys</span><span class="o">.</span><span class="n">stdout</span><span class="o">.</span><span class="n">flush</span><span class="p">()</span>
-    <span class="n">time</span><span class="o">.</span><span class="n">sleep</span><span class="p">(</span><span class="n">SLEEP_DELAY</span><span class="p">)</span>
-
-<span class="k">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">&quot;__main__&quot;</span><span class="p">:</span>
-  <span class="n">main</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">)</span>
-</pre>
-<h2 id="aurora-configuration">Aurora Configuration</h2>
-
-<p>Once we have our script/program, we need to create a <em>configuration
-file</em> that tells Aurora how to manage and launch our Job. Save the below
-code in the file <code>hello_world.aurora</code>.</p>
-
-<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
--->
-<pre class="highlight python"><span class="n">pkg_path</span> <span class="o">=</span> <span class="s">&#39;/vagrant/hello_world.py&#39;</span>
-
-<span class="c"># we use a trick here to make the configuration change with</span>
-<span class="c"># the contents of the file, for simplicity.  in a normal setting, packages would be</span>
-<span class="c"># versioned, and the version number would be changed in the configuration.</span>
-<span class="kn">import</span> <span class="nn">hashlib</span>
-<span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="n">pkg_path</span><span class="p">,</span> <span class="s">&#39;rb&#39;</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
-  <span class="n">pkg_checksum</span> <span class="o">=</span> <span class="nb">hash</span><span class="n">lib</span><span class="o">.</span><span class="n">md5</span><span class="p">(</span><span class="n">f</span><span class="o">.</span><span class="n">read</span><span class="p">())</span><span class="o">.</span><span class="nb">hex</span><span class="n">digest</span><span class="p">()</span>
-
-<span class="c"># copy hello_world.py into the local sandbox</span>
-<span class="n">install</span> <span class="o">=</span> <span class="n">Process</span><span class="p">(</span>
-  <span class="n">name</span> <span class="o">=</span> <span class="s">&#39;fetch_package&#39;</span><span class="p">,</span>
-  <span class="n">cmdline</span> <span class="o">=</span> <span class="s">&#39;cp </span><span class="si">%</span><span class="s">s . &amp;&amp; echo </span><span class="si">%</span><span class="s">s &amp;&amp; chmod +x hello_world.py&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">pkg_path</span><span class="p">,</span> <span class="n">pkg_checksum</span><span class="p">))</span>
-
-<span class="c"># run the script</span>
-<span class="n">hello_world</span> <span class="o">=</span> <span class="n">Process</span><span class="p">(</span>
-  <span class="n">name</span> <span class="o">=</span> <span class="s">&#39;hello_world&#39;</span><span class="p">,</span>
-  <span class="n">cmdline</span> <span class="o">=</span> <span class="s">&#39;python hello_world.py&#39;</span><span class="p">)</span>
-
-<span class="c"># describe the task</span>
-<span class="n">hello_world_task</span> <span class="o">=</span> <span class="n">SequentialTask</span><span class="p">(</span>
-  <span class="n">processes</span> <span class="o">=</span> <span class="p">[</span><span class="n">install</span><span class="p">,</span> <span class="n">hello_world</span><span class="p">],</span>
-  <span class="n">resources</span> <span class="o">=</span> <span class="n">Resources</span><span class="p">(</span><span class="n">cpu</span> <span class="o">=</span> <span class="mi">1</span><span class="p">,</span> <span class="n">ram</span> <span class="o">=</span> <span class="mi">1</span><span class="o">*</span><span class="n">MB</span><span class="p">,</span> <span class="n">disk</span><span class="o">=</span><span class="mi">8</span><span class="o">*</span><span class="n">MB</span><span class="p">))</span>
-
-<span class="n">jobs</span> <span class="o">=</span> <span class="p">[</span>
-  <span class="n">Service</span><span class="p">(</span><span class="n">cluster</span> <span class="o">=</span> <span class="s">&#39;devcluster&#39;</span><span class="p">,</span>
-          <span class="n">environment</span> <span class="o">=</span> <span class="s">&#39;devel&#39;</span><span class="p">,</span>
-          <span class="n">role</span> <span class="o">=</span> <span class="s">&#39;www-data&#39;</span><span class="p">,</span>
-          <span class="n">name</span> <span class="o">=</span> <span class="s">&#39;hello_world&#39;</span><span class="p">,</span>
-          <span class="n">task</span> <span class="o">=</span> <span class="n">hello_world_task</span><span class="p">)</span>
-<span class="p">]</span>
-</pre>
-<p>For more about Aurora configuration files, see the <a href="/documentation/latest/configuration-tutorial/">Configuration
-Tutorial</a> and the <a href="/documentation/latest/configuration-reference/">Aurora + Thermos
-Reference</a> (preferably after finishing this
-tutorial).</p>
-
-<h2 id="what&#39;s-going-on-in-that-configuration-file?">What&rsquo;s Going On In That Configuration File?</h2>
-
-<p>More than you might think.</p>
-
-<ol>
-<li><p>From a &ldquo;big picture&rdquo; viewpoint, it first defines two
-Processes. Then it defines a Task that runs the two Processes in the
-order specified in the Task definition, as well as specifying what
-computational and memory resources are available for them.  Finally,
-it defines a Job that will schedule the Task on available and suitable
-machines. This Job is the sole member of a list of Jobs; you can
-specify more than one Job in a config file.</p></li>
-<li><p>At the Process level, it specifies how to get your code into the
-local sandbox in which it will run. It then specifies how the code is
-actually run once the second Process starts.</p></li>
-</ol>
-
-<h2 id="creating-the-job">Creating the Job</h2>
-
-<p>We&rsquo;re ready to launch our job! To do so, we use the Aurora Client to
-issue a Job creation request to the Aurora scheduler.</p>
-
-<p>Many Aurora Client commands take a <em>job key</em> argument, which uniquely
-identifies a Job. A job key consists of four parts, each separated by a
-&ldquo;/&rdquo;. The four parts are  <code>&lt;cluster&gt;/&lt;role&gt;/&lt;environment&gt;/&lt;jobname&gt;</code>
-in that order. When comparing two job keys, if any of the
-four parts is different from its counterpart in the other key, then the
-two job keys identify two separate jobs. If all four values are
-identical, the job keys identify the same job.</p>
-
-<p><code>/etc/aurora/clusters.json</code> within the Aurora scheduler has the available
-cluster names. For Vagrant, from the top-level of your Aurora repository clone,
-do:</p>
-<pre class="highlight text">$ vagrant ssh
-</pre>
-<p>Followed by:</p>
-<pre class="highlight text">vagrant@precise64:~$ cat /etc/aurora/clusters.json
-</pre>
-<p>You&rsquo;ll see something like:</p>
-<pre class="highlight javascript"><span class="p">[{</span>
-  <span class="s2">&quot;name&quot;</span><span class="err">:</span> <span class="s2">&quot;devcluster&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;zk&quot;</span><span class="err">:</span> <span class="s2">&quot;192.168.33.7&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;scheduler_zk_path&quot;</span><span class="err">:</span> <span class="s2">&quot;/aurora/scheduler&quot;</span><span class="p">,</span>
-  <span class="s2">&quot;auth_mechanism&quot;</span><span class="err">:</span> <span class="s2">&quot;UNAUTHENTICATED&quot;</span>
-<span class="p">}]</span>
-</pre>
-<p>Use a <code>name</code> value for your job key&rsquo;s cluster value.</p>
-
-<p>Role names are user accounts existing on the slave machines. If you don&rsquo;t know what accounts
-are available, contact your sysadmin.</p>
-
-<p>Environment names are namespaces; you can count on <code>prod</code>, <code>devel</code> and <code>test</code> existing.</p>
-
-<p>The Aurora Client command that actually runs our Job is <code>aurora create</code>. It creates a Job as
-specified by its job key and configuration file arguments and runs it.</p>
-<pre class="highlight text">aurora create &lt;cluster&gt;/&lt;role&gt;/&lt;environment&gt;/&lt;jobname&gt; &lt;config_file&gt;
-</pre>
-<p>Or for our example:</p>
-<pre class="highlight text">aurora create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
-</pre>
-<p>This returns:</p>
-<pre class="highlight text">$ vagrant ssh
-Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
-
- * Documentation:  https://help.ubuntu.com/
-Welcome to your Vagrant-built virtual machine.
-Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
-vagrant@precise64:~$ aurora create devcluster/www-data/devel/hello_world \
-    /vagrant/hello_world.aurora
- INFO] Creating job hello_world
- INFO] Response from scheduler: OK (message: 1 new tasks pending for job
-  www-data/devel/hello_world)
- INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
-</pre>
-<h2 id="watching-the-job-run">Watching the Job Run</h2>
-
-<p>Now that our job is running, let&rsquo;s see what it&rsquo;s doing. Access the
-scheduler web interface at <code>http://$scheduler_hostname:$scheduler_port/scheduler</code>
-Or when using <code>vagrant</code>, <code>http://192.168.33.7:8081/scheduler</code>
-First we see what Jobs are scheduled:</p>
-
-<p><img alt="Scheduled Jobs" src="../images/ScheduledJobs.png" /></p>
-
-<p>Click on your user name, which in this case was <code>www-data</code>, and we see the Jobs associated
-with that role:</p>
-
-<p><img alt="Role Jobs" src="../images/RoleJobs.png" /></p>
-
-<p>If you click on your <code>hello_world</code> Job, you&rsquo;ll see:</p>
-
-<p><img alt="hello_world Job" src="../images/HelloWorldJob.png" /></p>
-
-<p>Oops, looks like our first job didn&rsquo;t quite work! The task failed, so we have
-to figure out what went wrong.</p>
-
-<p>Access the page for our Task by clicking on its host.</p>
-
-<p><img alt="Task page" src="../images/TaskBreakdown.png" /></p>
-
-<p>Once there, we see that the
-<code>hello_world</code> process failed. The Task page captures the standard error and
-standard output streams and makes them available. Clicking through
-to <code>stderr</code> on the failed <code>hello_world</code> process, we see what happened.</p>
-
-<p><img alt="stderr page" src="../images/stderr.png" /></p>
-
-<p>It looks like we made a typo in our Python script. We wanted <code>xrange</code>,
-not <code>xrang</code>. Edit the <code>hello_world.py</code> script to use the correct function and
-we will try again.</p>
-<pre class="highlight text">aurora update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
-</pre>
-<p>This time, the task comes up, we inspect the page, and see that the
-<code>hello_world</code> process is running.</p>
-
-<p><img alt="Running Task page" src="../images/runningtask.png" /></p>
-
-<p>We then inspect the output by clicking on <code>stdout</code> and see our process&#39;
-output:</p>
-
-<p><img alt="stdout page" src="../images/stdout.png" /></p>
-
-<h2 id="cleanup">Cleanup</h2>
-
-<p>Now that we&rsquo;re done, we kill the job using the Aurora client:</p>
-<pre class="highlight text">vagrant@precise64:~$ aurora killall devcluster/www-data/devel/hello_world
- INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
- INFO] Response from scheduler: OK (message: Tasks killed.)
- INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
-vagrant@precise64:~$
-</pre>
-<p>The job page now shows the <code>hello_world</code> tasks as completed.</p>
-
-<p><img alt="Killed Task page" src="../images/killedtask.png" /></p>
-
-<h2 id="next-steps">Next Steps</h2>
-
-<p>Now that you&rsquo;ve finished this Tutorial, you should read or do the following:</p>
-
-<ul>
-<li><a href="/documentation/latest/configuration-tutorial/">The Aurora Configuration Tutorial</a>, which provides more examples
-and best practices for writing Aurora configurations. You should also look at
-the <a href="/documentation/latest/configuration-reference/">Aurora + Thermos Configuration Reference</a>.</li>
-<li>The <a href="/documentation/latest/user-guide/">Aurora User Guide</a> provides an overview of how Aurora, Mesos, and
-Thermos work &ldquo;under the hood&rdquo;.</li>
-<li>Explore the Aurora Client - use the <code>aurora help</code> subcommand, and read the
-<a href="/documentation/latest/client-commands/">Aurora Client Commands</a> document.</li>
-</ul>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/user-guide/index.html b/publish/documentation/latest/user-guide/index.html
deleted file mode 100644
index 2e1eabc..0000000
--- a/publish/documentation/latest/user-guide/index.html
+++ /dev/null
@@ -1,462 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h2 id="aurora-user-guide">Aurora User Guide</h2>
-
-<ul>
-<li><a href="#user-content-overview">Overview</a></li>
-<li><a href="#user-content-job-lifecycle">Job Lifecycle</a>
-
-<ul>
-<li><a href="#user-content-life-of-a-task">Life Of A Task</a></li>
-<li><a href="#user-content-pending-to-running-states">PENDING to RUNNING states</a></li>
-<li><a href="#user-content-task-updates">Task Updates</a></li>
-<li><a href="#user-content-http-health-checking-and-graceful-shutdown">HTTP Health Checking and Graceful Shutdown</a>
-
-<ul>
-<li><a href="#user-content-tearing-a-task-down">Tearing a task down</a></li>
-</ul></li>
-<li><a href="#user-content-giving-priority-to-production-tasks-preempting">Giving Priority to Production Tasks: PREEMPTING</a></li>
-<li><a href="#user-content-natural-termination-finished-failed">Natural Termination: FINISHED, FAILED</a></li>
-<li><a href="#user-content-forceful-termination-killing-restarting">Forceful Termination: KILLING, RESTARTING</a></li>
-</ul></li>
-<li><a href="#user-content-service-discovery">Service Discovery</a></li>
-<li><a href="#user-content-configuration">Configuration</a></li>
-<li><a href="#user-content-creating-jobs">Creating Jobs</a></li>
-<li><a href="#user-content-interacting-with-jobs">Interacting With Jobs</a></li>
-</ul>
-
-<h2 id="overview">Overview</h2>
-
-<p>This document gives an overview of how Aurora works under the hood.
-It assumes you&rsquo;ve already worked through the &ldquo;hello world&rdquo; example
-job in the <a href="/documentation/latest/tutorial/">Aurora Tutorial</a>. Specifics of how to use Aurora are <strong>not</strong>
- given here, but pointers to documentation about how to use Aurora are
-provided.</p>
-
-<p>Aurora is a Mesos framework used to schedule <em>jobs</em> onto Mesos. Mesos
-cares about individual <em>tasks</em>, but typical jobs consist of dozens or
-hundreds of task replicas. Aurora provides a layer on top of Mesos with
-its <code>Job</code> abstraction. An Aurora <code>Job</code> consists of a task template and
-instructions for creating near-identical replicas of that task (modulo
-things like &ldquo;instance id&rdquo; or specific port numbers which may differ from
-machine to machine).</p>
-
-<p>How many tasks make up a Job is complicated. On a basic level, a Job consists of
-one task template and instructions for creating near-idential replicas of that task
-(otherwise referred to as &ldquo;instances&rdquo; or &ldquo;shards&rdquo;).</p>
-
-<p>However, since Jobs can be updated on the fly, a single Job identifier or <em>job key</em>
-can have multiple job configurations associated with it.</p>
-
-<p>For example, consider when I have a Job with 4 instances that each
-request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
-in the configuration file <code>hello_world.aurora</code>. I want to
-update it so it requests 2 GB of RAM instead of 1. I create a new
-configuration file to do that called <code>new_hello_world.aurora</code> and
-issue a <code>aurora update --shards=0-1 &lt;job_key_value&gt; new_hello_world.aurora</code>
-command.</p>
-
-<p>This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
-while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
-dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.</p>
-
-<p>So that means there are two simultaneous task configurations for the same Job
-at the same time, just valid for different ranges of instances.</p>
-
-<p>This isn&rsquo;t a recommended pattern, but it is valid and supported by the
-Aurora scheduler. This most often manifests in the &ldquo;canary pattern&rdquo; where
-instance 0 runs with a different configuration than instances 1-N to test
-different code versions alongside the actual production job.</p>
-
-<p>A task can merely be a single <em>process</em> corresponding to a single
-command line, such as <code>python2.6 my_script.py</code>. However, a task can also
-consist of many separate processes, which all run within a single
-sandbox. For example, running multiple cooperating agents together,
-such as <code>logrotate</code>, <code>installer</code>, master, or slave processes. This is
-where Thermos  comes in. While Aurora provides a <code>Job</code> abstraction on
-top of Mesos <code>Tasks</code>, Thermos provides a <code>Process</code> abstraction
-underneath Mesos <code>Task</code>s and serves as part of the Aurora framework&rsquo;s
-executor.</p>
-
-<p>You define <code>Job</code>s,<code>Task</code>s, and <code>Process</code>es in a configuration file.
-Configuration files are written in Python, and make use of the Pystachio
-templating language. They end in a <code>.aurora</code> extension.</p>
-
-<p>Pystachio is a type-checked dictionary templating library.</p>
-
-<blockquote>
-<p>TL;DR</p>
-
-<ul>
-<li>  Aurora manages jobs made of tasks.</li>
-<li>  Mesos manages tasks made of processes.</li>
-<li>  Thermos manages processes.</li>
-<li>  All defined in <code>.aurora</code> configuration file.</li>
-</ul>
-</blockquote>
-
-<p><img alt="Aurora hierarchy" src="../images/aurora_hierarchy.png" /></p>
-
-<p>Each <code>Task</code> has a <em>sandbox</em> created when the <code>Task</code> starts and garbage
-collected when it finishes. All of a <code>Task&#39;</code>s processes run in its
-sandbox, so processes can share state by using a shared current working
-directory.</p>
-
-<p>The sandbox garbage collection policy considers many factors, most
-importantly age and size. It makes a best-effort attempt to keep
-sandboxes around as long as possible post-task in order for service
-owners to inspect data and logs, should the <code>Task</code> have completed
-abnormally. But you can&rsquo;t design your applications assuming sandboxes
-will be around forever, e.g. by building log saving or other
-checkpointing mechanisms directly into your application or into your
-<code>Job</code> description.</p>
-
-<h2 id="job-lifecycle">Job Lifecycle</h2>
-
-<p>When Aurora reads a configuration file and finds a <code>Job</code> definition, it:</p>
-
-<ol>
-<li> Evaluates the <code>Job</code> definition.</li>
-<li> Splits the <code>Job</code> into its constituent <code>Task</code>s.</li>
-<li> Sends those <code>Task</code>s to the scheduler.</li>
-<li> The scheduler puts the <code>Task</code>s into <code>PENDING</code> state, starting each
-<code>Task</code>&rsquo;s life cycle.</li>
-</ol>
-
-<h3 id="life-of-a-task">Life Of A Task</h3>
-
-<p><img alt="Life of a task" src="../images/lifeofatask.png" /></p>
-
-<h3 id="pending-to-running-states">PENDING to RUNNING states</h3>
-
-<p>When a <code>Task</code> is in the <code>PENDING</code> state, the scheduler constantly
-searches for machines satisfying that <code>Task</code>&rsquo;s resource request
-requirements (RAM, disk space, CPU time) while maintaining configuration
-constraints such as &ldquo;a <code>Task</code> must run on machines  dedicated  to a
-particular role&rdquo; or attribute limit constraints such as &ldquo;at most 2
-<code>Task</code>s from the same <code>Job</code> may run on each rack&rdquo;. When the scheduler
-finds a suitable match, it assigns the <code>Task</code> to a machine and puts the
-<code>Task</code> into the <code>ASSIGNED</code> state.</p>
-
-<p>From the <code>ASSIGNED</code> state, the scheduler sends an RPC to the slave
-machine containing <code>Task</code> configuration, which the slave uses to spawn
-an executor responsible for the <code>Task</code>&rsquo;s lifecycle. When the scheduler
-receives an acknowledgement that the machine has accepted the <code>Task</code>,
-the <code>Task</code> goes into <code>STARTING</code> state.</p>
-
-<p><code>STARTING</code> state initializes a <code>Task</code> sandbox. When the sandbox is fully
-initialized, Thermos begins to invoke <code>Process</code>es. Also, the slave
-machine sends an update to the scheduler that the <code>Task</code> is
-in <code>RUNNING</code> state.</p>
-
-<p>If a <code>Task</code> stays in <code>ASSIGNED</code> or <code>STARTING</code> for too long, the
-scheduler forces it into <code>LOST</code> state, creating a new <code>Task</code> in its
-place that&rsquo;s sent into <code>PENDING</code> state. This is technically true of any
-active state: if the Mesos core tells the scheduler that a slave has
-become unhealthy (or outright disappeared), the <code>Task</code>s assigned to that
-slave go into <code>LOST</code> state and new <code>Task</code>s are created in their place.
-From <code>PENDING</code> state, there is no guarantee a <code>Task</code> will be reassigned
-to the same machine unless job constraints explicitly force it there.</p>
-
-<p>If there is a state mismatch, (e.g. a machine returns from a <code>netsplit</code>
-and the scheduler has marked all its <code>Task</code>s <code>LOST</code> and rescheduled
-them), a state reconciliation process kills the errant <code>RUNNING</code> tasks,
-which may take up to an hour. But to emphasize this point: there is no
-uniqueness guarantee for a single instance of a job in the presence of
-network partitions. If the Task requires that, it should be baked in at
-the application level using a distributed coordination service such as
-Zookeeper.</p>
-
-<h3 id="task-updates">Task Updates</h3>
-
-<p><code>Job</code> configurations can be updated at any point in their lifecycle.
-Usually updates are done incrementally using a process called a <em>rolling
-upgrade</em>, in which Tasks are upgraded in small groups, one group at a
-time.  Updates are done using various Aurora Client commands.</p>
-
-<p>For a configuration update, the Aurora Client calculates required changes
-by examining the current job config state and the new desired job config.
-It then starts a rolling batched update process by going through every batch
-and performing these operations:</p>
-
-<ul>
-<li>If an instance is present in the scheduler but isn&rsquo;t in the new config,
-then that instance is killed.</li>
-<li>If an instance is not present in the scheduler but is present in
-the new config, then the instance is created.</li>
-<li>If an instance is present in both the scheduler the new config, then
-the client diffs both task configs. If it detects any changes, it
-performs an instance update by killing the old config instance and adds
-the new config instance.</li>
-</ul>
-
-<p>The Aurora client continues through the instance list until all tasks are
-updated, in <code>RUNNING,</code> and healthy for a configurable amount of time.
-If the client determines the update is not going well (a percentage of health
-checks have failed), it cancels the update.</p>
-
-<p>Update cancellation runs a procedure similar to the described above
-update sequence, but in reverse order. New instance configs are swapped
-with old instance configs and batch updates proceed backwards
-from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
-8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).</p>
-
-<h3 id="http-health-checking-and-graceful-shutdown">HTTP Health Checking and Graceful Shutdown</h3>
-
-<p>The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
-this protocol by declaring a port named <code>health</code>.  Take for example this configuration snippet:</p>
-<pre class="highlight text">nginx = Process(
-  name = &#39;nginx&#39;,
-  cmdline = &#39;./run_nginx.sh -port {{thermos.ports[http]}}&#39;)
-</pre>
-<p>When this Process is included in a job, the job will be allocated a port, and the command line
-will be replaced with something like:</p>
-<pre class="highlight text">./run_nginx.sh -port 42816
-</pre>
-<p>Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
-a task only by liveness of the forked process.  However, when a <code>health</code> port was allocated, it will
-also send periodic HTTP health checks.  A task requesting a <code>health</code> port must handle the following
-requests:</p>
-
-<table><thead>
-<tr>
-<th>HTTP request</th>
-<th>Description</th>
-</tr>
-</thead><tbody>
-<tr>
-<td><code>GET /health</code></td>
-<td>Inquires whether the task is healthy.</td>
-</tr>
-<tr>
-<td><code>POST /quitquitquit</code></td>
-<td>Task should initiate graceful shutdown.</td>
-</tr>
-<tr>
-<td><code>POST /abortabortabort</code></td>
-<td>Final warning task is being killed.</td>
-</tr>
-</tbody></table>
-
-<p>Please see the
-<a href="configuration-reference.md#user-content-healthcheckconfig-objects">configuration reference</a> for
-configuration options for this feature.</p>
-
-<h4 id="snoozing-health-checks">Snoozing Health Checks</h4>
-
-<p>If you need to pause your health check, you can do so by touching a file inside of your sandbox,
-named <code>.healthchecksnooze</code></p>
-
-<p>As long as that file is present, health checks will be disabled, enabling users to gather core dumps
-or other performance measurements without worrying about Aurora&rsquo;s health check killing their
-process.</p>
-
-<p>WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
-disabled health checks.</p>
-
-<h4 id="tearing-a-task-down">Tearing a task down</h4>
-
-<p>The Executor follows an escalation sequence when killing a running task:</p>
-
-<ol>
-<li>If <code>health</code> port is not present, skip to (5)</li>
-<li>POST /quitquitquit</li>
-<li>wait 5 seconds</li>
-<li>POST /abortabortabort</li>
-<li>Send SIGTERM (<code>kill</code>)</li>
-<li>Send SIGKILL (<code>kill -9</code>)</li>
-</ol>
-
-<p>If the Executor notices that all Processes in a Task have aborted during this sequence, it will
-not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
-inevitable realities of distributed systems, it may not be performed.</p>
-
-<h3 id="giving-priority-to-production-tasks:-preempting">Giving Priority to Production Tasks: PREEMPTING</h3>
-
-<p>Sometimes a Task needs to be interrupted, such as when a non-production
-Task&rsquo;s resources are needed by a higher priority production Task. This
-type of interruption is called a <em>pre-emption</em>. When this happens in
-Aurora, the non-production Task is killed and moved into
-the <code>PREEMPTING</code> state  when both the following are true:</p>
-
-<ul>
-<li>The task being killed is a non-production task.</li>
-<li>The other task is a <code>PENDING</code> production task that hasn&rsquo;t been
-scheduled due to a lack of resources.</li>
-</ul>
-
-<p>Since production tasks are much more important, Aurora kills off the
-non-production task to free up resources for the production task. The
-scheduler UI shows the non-production task was preempted in favor of the
-production task. At some point, tasks in <code>PREEMPTING</code> move to <code>KILLED</code>.</p>
-
-<p>Note that non-production tasks consuming many resources are likely to be
-preempted in favor of production tasks.</p>
-
-<h3 id="natural-termination:-finished,-failed">Natural Termination: FINISHED, FAILED</h3>
-
-<p>A <code>RUNNING</code> <code>Task</code> can terminate without direct user interaction. For
-example, it may be a finite computation that finishes, even something as
-simple as <code>echo hello world.</code>Or it could be an exceptional condition in
-a long-lived service. If the <code>Task</code> is successful (its underlying
-processes have succeeded with exit status <code>0</code> or finished without
-reaching failure limits) it moves into <code>FINISHED</code> state. If it finished
-after reaching a set of failure limits, it goes into <code>FAILED</code> state.</p>
-
-<h3 id="forceful-termination:-killing,-restarting">Forceful Termination: KILLING, RESTARTING</h3>
-
-<p>You can terminate a <code>Task</code> by issuing an <code>aurora kill</code> command, which
-moves it into <code>KILLING</code> state. The scheduler then sends the slave  a
-request to terminate the <code>Task</code>. If the scheduler receives a successful
-response, it moves the Task into <code>KILLED</code> state and never restarts it.</p>
-
-<p>The scheduler has access to a non-public <code>RESTARTING</code> state. If a <code>Task</code>
-is forced into the <code>RESTARTING</code> state, the scheduler kills the
-underlying task but in parallel schedules an identical replacement for
-it.</p>
-
-<h2 id="configuration">Configuration</h2>
-
-<p>You define and configure your Jobs (and their Tasks and Processes) in
-Aurora configuration files. Their filenames end with the <code>.aurora</code>
-suffix, and you write them in Python making use of the Pystachio
-templating language, along
-with specific Aurora, Mesos, and Thermos commands and methods. See the
-<a href="/documentation/latest/configuration-reference/">Configuration Guide and Reference</a> and
-<a href="/documentation/latest/configuration-tutorial/">Configuration Tutorial</a>.</p>
-
-<h2 id="service-discovery">Service Discovery</h2>
-
-<p>It is possible for the Aurora executor to announce tasks into ServerSets for
-the purpose of service discovery.  ServerSets use the Zookeeper <a href="http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox">group membership pattern</a>
-of which there are several reference implementations:</p>
-
-<ul>
-<li><a href="https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp">C++</a></li>
-<li><a href="https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221">Java</a></li>
-<li><a href="https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51">Python</a></li>
-</ul>
-
-<p>These can also be used natively in Finagle using the <a href="https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala">ZookeeperServerSetCluster</a>.</p>
-
-<p>For more information about how to configure announcing, see the <a href="/documentation/latest/configuration-reference/">Configuration Reference</a>.</p>
-
-<h2 id="creating-jobs">Creating Jobs</h2>
-
-<p>You create and manipulate Aurora Jobs with the Aurora client, which starts all its
-command line commands with
-<code>aurora</code>. See <a href="/documentation/latest/client-commands/">Aurora Client Commands</a> for details
-about the Aurora Client.</p>
-
-<h2 id="interacting-with-jobs">Interacting With Jobs</h2>
-
-<p>You interact with Aurora jobs either via:</p>
-
-<ul>
-<li>Read-only Web UIs</li>
-</ul>
-
-<p>Part of the output from creating a new Job is a URL for the Job&rsquo;s scheduler UI page.</p>
-
-<p>For example:</p>
-<pre class="highlight text">  vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello \
-  /vagrant/examples/jobs/hello_world.aurora
-  INFO] Creating job hello
-  INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
-  INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
-</pre>
-<p>The &ldquo;Job url&rdquo; goes to the Job&rsquo;s scheduler UI page. To go to the overall scheduler UI page,
-  stop at the &ldquo;scheduler&rdquo; part of the URL, in this case, <code>http://precise64:8081/scheduler</code></p>
-
-<p>You can also reach the scheduler UI page via the Client command <code>aurora open</code>:</p>
-<pre class="highlight text">  aurora open [&lt;cluster&gt;[/&lt;role&gt;[/&lt;env&gt;/&lt;job_name&gt;]]]
-</pre>
-<p>If only the cluster is specified, it goes directly to that cluster&rsquo;s scheduler main page.
-  If the role is specified, it goes to the top-level role page. If the full job key is specified,
-  it goes directly to the job page where you can inspect individual tasks.</p>
-
-<p>Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
-  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
-  jobs and user accounts for test or development jobs.</p>
-
-<ul>
-<li>The Aurora Client&rsquo;s command line interface</li>
-</ul>
-
-<p>Several Client commands have a <code>-o</code> option that automatically opens a window to
-  the specified Job&rsquo;s scheduler UI URL. And, as described above, the <code>open</code> command also takes
-  you there.</p>
-
-<p>For a complete list of Aurora Client commands, use <code>aurora help</code> and, for specific
-  command help, <code>aurora help [command]</code>. <strong>Note</strong>: <code>aurora help open</code>
-  returns <code>&quot;subcommand open not found&quot;</code> due to our reflection tricks not
-  working on words that are also builtin Python function names. Or see the
-  <a href="/documentation/latest/client-commands/">Aurora Client Commands</a> document.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/documentation/latest/vagrant/index.html b/publish/documentation/latest/vagrant/index.html
deleted file mode 100644
index 65f3a1f..0000000
--- a/publish/documentation/latest/vagrant/index.html
+++ /dev/null
@@ -1,121 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="getting-started">Getting Started</h1>
-
-<p>To replicate a real cluster environment as closely as possible, we use
-<a href="http://www.vagrantup.com/">Vagrant</a> to launch a complete Aurora cluster in a virtual machine.</p>
-
-<h2 id="prerequisites">Prerequisites</h2>
-
-<ul>
-<li><a href="https://www.virtualbox.org/">VirtualBox</a></li>
-<li><a href="http://www.vagrantup.com/">Vagrant</a></li>
-<li>A clone of the Aurora repository, or source distribution.</li>
-</ul>
-
-<p>You can start a local cluster by running:</p>
-<pre class="highlight text">vagrant up
-</pre>
-<p>Once started, several services should be running:</p>
-
-<ul>
-<li>scheduler is listening on <a href="http://192.168.33.7:8081">http://192.168.33.7:8081</a></li>
-<li>observer is listening on <a href="http://192.168.33.7:1338">http://192.168.33.7:1338</a></li>
-<li>master is listening on <a href="http://192.168.33.7:5050">http://192.168.33.7:5050</a></li>
-<li>slave is listening on <a href="http://192.168.33.7:5051">http://192.168.33.7:5051</a></li>
-</ul>
-
-<p>You can SSH into the machine with <code>vagrant ssh</code> and execute aurora client commands using the
-<code>aurora</code> command.  A pre-installed <code>clusters.json</code> file refers to your local cluster as
-<code>devcluster</code>, which you will use in client commands.</p>
-
-<h1 id="deleting-your-local-cluster">Deleting your local cluster</h1>
-
-<p>Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
-you can use the command <code>vagrant destroy</code> to turn off and delete the virtual file system.</p>
-
-<h1 id="rebuilding-components">Rebuilding components</h1>
-
-<p>If you are changing Aurora code and would like to rebuild a component, you can use the <code>aurorabuild</code>
-command on your vagrant machine to build and restart a component.  This is considerably faster than
-destroying and rebuilding your VM.</p>
-
-<p><code>aurorabuild</code> accepts a list of components to build and update.  You may invoke the command with
-no arguments to get a list of supported components.</p>
-<pre class="highlight text"> vagrant ssh -c &#39;aurorabuild client&#39;
-</pre>
-<h1 id="troubleshooting">Troubleshooting</h1>
-
-<p>Most of the vagrant related problems can be fixed by the following steps:
-* Destroying the vagrant environment with <code>vagrant destroy</code>
-* Killing any orphaned VMs (see AURORA-499) with <code>virtualbox</code> UI or <code>VBoxManage</code> command line tool
-* Cleaning the repository of build artifacts and other intermediate output with <code>git clean -fdx</code>
-* Bringing up the vagrant environment with <code>vagrant up</code></p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/downloads/index.html b/publish/downloads/index.html
deleted file mode 100644
index 72e96df..0000000
--- a/publish/downloads/index.html
+++ /dev/null
@@ -1,96 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-        <div class="container-fluid section-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-</div>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <h1 id="download-aurora">Download Aurora</h1>
-
-<h2 id="current-release">Current Release</h2>
-
-<p>The current released version is <em>0.6.0-incubating</em>. <a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz">(.tar.gz)</a>
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.md5">(md5)</a>
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.sha">(sha)</a> 
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.asc">(sig)</a></p>
-
-<p>To quickly get started, we reccomend using Vagrant and following the <a href="http://localhost:4567/documentation/latest/vagrant/">Getting Started guide</a>.</p>
-
-<h2 id="previous-releases">Previous Releases</h2>
-
-<p><em>0.5.0-incubating</em> <a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz">(.tar.gz)</a>
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.md5">(md5)</a>
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.sha">(sha)</a> 
-<a href="https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.asc">(sig)</a></p>
-
-<h2 id="git">Git</h2>
-
-<p>The latest code is available in git via:</p>
-<pre class="highlight text">git clone http://git.apache.org/incubator-aurora.git
-</pre>
-<p>You can browse the repo on
-<a href="https://git-wip-us.apache.org/repos/asf?p=incubator-aurora.git">Apache Git</a>
-and the <a href="https://github.com/apache/incubator-aurora">Github mirror</a>.</p>
-
-  		</div>
-  	  </div>
-	  
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/publish/index.html b/publish/index.html
deleted file mode 100644
index 8e9a2aa..0000000
--- a/publish/index.html
+++ /dev/null
@@ -1,105 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-	<title>Apache Aurora</title>
-    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
-    <link href="/assets/css/main.css" rel="stylesheet">
-	<!-- Analytics -->
-	<script type="text/javascript">
-		  var _gaq = _gaq || [];
-		  _gaq.push(['_setAccount', 'UA-45879646-1']);
-		  _gaq.push(['_setDomainName', 'apache.org']);
-		  _gaq.push(['_trackPageview']);
-
-		  (function() {
-		    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
-		    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
-		    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
-		  })();
-	</script>
-  </head>
-  <body>
-	  
-	    <div class="container-fluid section-homepage-header">
-  <div class="container">
-    <div class="nav nav-bar">
-    <img src="/assets/img/aurora_logo_dkbkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
-    </div>
-  </div>
-<div class="container">
-<div class="row text-center">
-<div class="col-md-8 col-md-offset-2">
-  <br /><br /><br /><br /><br /><br /><br />
-  <h1>Apache Aurora is a Mesos framework for long-running services and cron jobs.</h1>
-  <br /><br /><br /><br /><br /><br /><br />
-</div>
-</div>
-</div>
-</div>
-		<div class="container-fluid section-ltgreen buffer">
-  <div class="container">
-  <div class="row">
-      <div class="col-md-5 col-md-offset-1"><h2>What does Aurora do?</h2><p>Aurora runs applications and services across a shared pool of machines, and is responsible for keeping them running, forever. When machines experience failure, Aurora intelligently reschedules those jobs onto healthy machines.</p></div>
-      <div class="col-md-4 col-md-offset-1">
-    <iframe
-    width="420" height="236"
-    src="http://www.youtube-nocookie.com/embed/asd_h6VzaJc?rel=0&amp;start=119&amp;controls=0"
-    allowfullscreen></iframe></div>
-  </div>
-  </div>
-</div>
-
-<div class="container-fluid buffer">
-  <div class="container">
-  <h2 class="text-center">Key Aurora Features</h2>
-  <div class="row">
-      <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-tasks"></span></p></div>
-      <div class="col-md-4"><h3>Rolling Updates with Automatic Rollback</h3><p>When updating a job, Aurora will detect the health and status of a deployment and automatically rollback if necessary.</p></div>
-      <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-th"></span></p></div>
-      <div class="col-md-4"><h3>Resource Quota and Multi-User Support</h3><p>Aurora has a quota system to provide guaranteed resources for specific applications, and can support multiple users to deploy services.</p></div>
-  </div>
-  <div class="row">
-      <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-list-alt"></span></p></div>
-      <div class="col-md-4"><h3>Sophisticated DSL</h3><p>Services are highly-configurable via a <a href="/documentation/latest/configuration-tutorial">DSL</a> which supports templating, allowing you to establish common patterns and avoid redundant configurations.</p></div>
-      <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-cloud-upload"></span></p></div>
-      <div class="col-md-4"><h3>Service Registration</h3><p>Aurora <a href="/documentation/latest/configuration-reference/#announcer-objects">announces</a> services to Apache ZooKeeper for discovery by clients like Finagle.</p></div>
-  </div>
- </div>
-</div>
-
-      
-      	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
-	</body>
-</html>
\ No newline at end of file
diff --git a/source/_footer.erb b/source/_footer.erb
new file mode 100644
index 0000000..56cb7ea
--- /dev/null
+++ b/source/_footer.erb
@@ -0,0 +1,24 @@
+	<div class="container-fluid section-footer buffer">
+      <div class="container">
+        <div class="row">
+		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
+		  <ul>
+		    <li><a href="/downloads/">Downloads</a></li>
+            <li><a href="/community/">Mailing Lists</a></li>
+			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
+			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
+		  </ul>
+	      </div>
+		  <div class="col-md-2"><h3>The ASF</h3>
+          <ul>
+            <li><a href="http://www.apache.org/licenses/">License</a></li>
+            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
+            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
+            <li><a href="http://www.apache.org/security/">Security</a></li>
+          </ul>
+		  </div>
+		  <div class="col-md-6">
+			<p class="disclaimer">&copy; 2014-2017 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
+        </div>
+      </div>
+    </div>
diff --git a/source/_footer.md.erb b/source/_footer.md.erb
deleted file mode 100644
index 7014b43..0000000
--- a/source/_footer.md.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-	<div class="container-fluid section-footer buffer">
-      <div class="container">
-        <div class="row">
-		  <div class="col-md-2 col-md-offset-1"><h3>Quick Links</h3>
-		  <ul>
-		    <li><a href="/downloads/">Downloads</a></li>
-            <li><a href="/community/">Mailing Lists</a></li>
-			<li><a href="http://issues.apache.org/jira/browse/AURORA">Issue Tracking</a></li>
-			<li><a href="/documentation/latest/contributing/">How To Contribute</a></li>     
-		  </ul>
-	      </div>
-		  <div class="col-md-2"><h3>The ASF</h3>
-          <ul>
-            <li><a href="http://www.apache.org/licenses/">License</a></li>
-            <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>  
-            <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
-            <li><a href="http://www.apache.org/security/">Security</a></li>
-          </ul>
-		  </div>
-		  <div class="col-md-6">
-		    <p class="disclaimer">Apache Aurora is an effort undergoing incubation at The Apache Software Foundation (ASF), sponsored by the Apache Incubator. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</p>
-			<p class="disclaimer">Copyright 2014 <a href="http://www.apache.org/">Apache Software Foundation</a>. Licensed under the <a href="http://www.apache.org/licenses/">Apache License v2.0</a>. The <a href="https://www.flickr.com/photos/trondk/12706051375/">Aurora Borealis IX photo</a> displayed on the homepage is available under a <a href="https://creativecommons.org/licenses/by-nc-nd/2.0/">Creative Commons BY-NC-ND 2.0 license</a>. Apache, Apache Aurora, and the Apache feather logo are trademarks of The Apache Software Foundation.</p>
-        </div>
-      </div>
-    </div>
\ No newline at end of file
diff --git a/source/_header_homepage.md.erb b/source/_header_homepage.md.erb
index 72de9ba..4c23a00 100644
--- a/source/_header_homepage.md.erb
+++ b/source/_header_homepage.md.erb
@@ -1,22 +1,21 @@
 <div class="container-fluid section-homepage-header">
   <div class="container">
     <div class="nav nav-bar">
-    <img src="/assets/img/aurora_logo_dkbkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/>
-	<ul class="nav navbar-nav navbar-right">
-      <li><a href="/documentation/latest/">Documentation</a></li>
-      <li><a href="/community/">Community</a></li>
-      <li><a href="/downloads/">Downloads</a></li>
-      <li><a href="/blog/">Blog</a></li>
-    </ul>
+      <img src="/assets/img/aurora_logo_dkbkg.svg" width="300"
+           alt="Transparent Apache Aurora logo with dark background"/>
+      <ul class="nav navbar-nav navbar-right">
+        <li><a href="/documentation/latest/">Documentation</a></li>
+        <li><a href="/community/">Community</a></li>
+        <li><a href="/downloads/">Downloads</a></li>
+        <li><a href="/blog/">Blog</a></li>
+      </ul>
     </div>
   </div>
-<div class="container">
-<div class="row text-center">
-<div class="col-md-8 col-md-offset-2">
-  <br /><br /><br /><br /><br /><br /><br />
-  <h1>Apache Aurora is a Mesos framework for long-running services and cron jobs.</h1>
-  <br /><br /><br /><br /><br /><br /><br />
+  <div class="container">
+    <div class="row text-center">
+      <div class="col-md-8 col-md-offset-2">
+        <h1>Aurora is a Mesos framework for <br />long-running services and cron jobs.</h1>
+      </div>
+    </div>
+  </div>
 </div>
-</div>
-</div>
-</div>
\ No newline at end of file
diff --git a/source/_header_normal.md.erb b/source/_header_normal.md.erb
index 08e8f29..fa97d2e 100644
--- a/source/_header_normal.md.erb
+++ b/source/_header_normal.md.erb
@@ -1,7 +1,7 @@
 <div class="container-fluid section-header">
   <div class="container">
     <div class="nav nav-bar">
-    <a href="/"><img src="/assets/img/aurora_logo_white_bkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
+    <a href="/"><img src="/assets/img/aurora_logo_dkbkg.svg" width="300" alt="Transparent Apache Aurora logo with dark background"/></a>
 	<ul class="nav navbar-nav navbar-right">
       <li><a href="/documentation/latest/">Documentation</a></li>
       <li><a href="/community/">Community</a></li>
diff --git a/source/assets/css/main.css b/source/assets/css/main.css
index 825e45d..c260e93 100644
--- a/source/assets/css/main.css
+++ b/source/assets/css/main.css
@@ -11,13 +11,13 @@
     font-size: 6em;
 	padding-top: 25px;
 }
-.section-ltgreen {
-	background: #40c3b0;
+.section-dark-flow {
+	background: #0A0A08;
 	color: #fff;
 	font-size: 1.3em;
 	text-shadow: .5px .5px 0px #1b5d3e;
 }
-.section-ltgreen h2 {
+.section-dark-flow h2 {
 	color: #fff;
 }
 .section-footer {
@@ -26,6 +26,7 @@
 .section-homepage-header {
 	background-image: url("/assets/img/aurora_image_1600.jpg");
 	background-size: cover;
+        height: 375px;
 	background-position: center bottom;
 }
 .section-homepage-header h1 {
@@ -60,6 +61,9 @@
 	font-size: .9em;
 	color: #AAA;
 }
+.documentation img {
+	max-width: 600px;
+}
 iframe {
 	border: none;
 }
@@ -76,4 +80,4 @@
 code, pre {
 	color: #000;
 	border: none;
-}
\ No newline at end of file
+}
diff --git a/source/assets/img/aurora_image_1600.jpg b/source/assets/img/aurora_image_1600.jpg
index b97a801..13d00cb 100644
--- a/source/assets/img/aurora_image_1600.jpg
+++ b/source/assets/img/aurora_image_1600.jpg
Binary files differ
diff --git a/source/assets/img/aurora_logo_on_blue.svg b/source/assets/img/aurora_logo_on_blue.svg
new file mode 100644
index 0000000..df01164
--- /dev/null
+++ b/source/assets/img/aurora_logo_on_blue.svg
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 19.2.1, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+	 viewBox="0 0 841.9 595.3" style="enable-background:new 0 0 841.9 595.3;" xml:space="preserve">
+<style type="text/css">
+	.st0{fill:#004764;}
+	.st1{fill:#5CC3AF;}
+	.st2{fill:#FFFFFF;}
+	.st3{font-family:'MyriadPro-Regular';}
+	.st4{font-size:12.5px;}
+	.st5{font-size:15px;}
+</style>
+<rect x="256" y="222" class="st0" width="330" height="150"/>
+<g>
+	<polygon class="st1" points="345.4,273.7 327.1,273.7 336.3,289.5 	"/>
+	<polygon class="st1" points="325.3,274.7 316.1,290.5 334.5,290.5 	"/>
+	<polygon class="st1" points="334.5,292.6 316.1,292.6 325.3,308.5 	"/>
+	<polygon class="st1" points="314.4,293.6 305.1,309.5 323.5,309.5 	"/>
+	<polygon class="st1" points="347.2,274.7 338,290.5 356.4,290.5 	"/>
+	<polygon class="st1" points="356.4,292.6 338,292.6 347.2,308.5 	"/>
+	<polygon class="st1" points="336.3,255.7 327.1,271.6 345.4,271.6 	"/>
+	<polygon class="st1" points="323.5,311.5 305.1,311.5 314.4,327.5 	"/>
+	<polygon class="st1" points="358.1,293.6 349,309.5 367.3,309.5 	"/>
+	<polygon class="st1" points="367.3,311.5 349,311.5 358.1,327.5 	"/>
+</g>
+<polygon class="st2" points="336.3,293.6 327.1,309.5 345.4,309.5 "/>
+<polygon class="st2" points="323.5,273.7 305.1,273.7 314.4,289.5 "/>
+<polyline class="st2" points="294.2,290.5 312.6,290.5 303.4,274.7 "/>
+<polyline class="st2" points="303.4,308.5 312.6,292.6 294.2,292.6 "/>
+<polygon class="st2" points="369.1,274.7 359.9,290.5 378.3,290.5 "/>
+<polygon class="st2" points="378.3,292.6 359.9,292.6 369.1,308.5 "/>
+<polygon class="st2" points="356.4,254.7 338,254.7 347.2,270.5 "/>
+<polygon class="st2" points="345.4,311.5 327,311.5 336.3,327.5 "/>
+<polygon class="st2" points="347.2,312.6 338,328.5 356.4,328.5 "/>
+<polygon class="st2" points="334.5,254.7 316.1,254.7 325.3,270.5 "/>
+<polygon class="st2" points="314.4,255.7 305.1,271.6 323.5,271.6 "/>
+<polygon class="st2" points="358.1,255.7 349,271.6 367.3,271.6 "/>
+<polygon class="st2" points="367.3,273.7 349,273.7 358.1,289.5 "/>
+<polygon class="st2" points="325.3,312.6 316.1,328.5 334.5,328.5 "/>
+<g>
+	<path class="st2" d="M436.6,312.5c-1.3,0-2.5-0.2-3.6-0.5c-1.1-0.4-2.1-0.9-2.9-1.6c-0.8-0.7-1.5-1.6-1.9-2.7
+		c-0.5-1.1-0.7-2.3-0.7-3.6v-17.1h3v17c0,0.9,0.1,1.7,0.5,2.4c0.3,0.7,0.7,1.3,1.3,1.8c0.6,0.5,1.2,0.8,1.9,1.1
+		c0.8,0.3,1.6,0.4,2.5,0.4c0.9,0,1.7-0.1,2.5-0.4c0.8-0.2,1.4-0.6,1.9-1.1c0.5-0.5,1-1.1,1.3-1.8c0.3-0.7,0.5-1.5,0.5-2.4v-17h3V304
+		c0,1.4-0.2,2.6-0.7,3.6c-0.5,1.1-1.1,1.9-1.9,2.7c-0.8,0.7-1.8,1.2-2.9,1.6C439.1,312.3,437.9,312.5,436.6,312.5z"/>
+	<path class="st2" d="M454,312.2v-25.3h6c0.8,0,1.7,0.1,2.7,0.4c1,0.3,1.9,0.7,2.8,1.2c0.9,0.6,1.6,1.3,2.2,2.1
+		c0.6,0.9,0.9,1.9,0.9,3.1c0,1.8-0.4,3.3-1.4,4.5c-0.9,1.2-2.2,2-4,2.4l5.4,11.5h-3.3l-5.3-11.3h-0.6H457v11.3H454z M457,298h3.1
+		c1.1,0,1.9-0.1,2.6-0.4c0.7-0.3,1.2-0.6,1.6-1c0.4-0.4,0.7-0.9,0.9-1.4c0.2-0.5,0.3-1,0.3-1.5c0-0.5-0.1-1-0.4-1.5
+		c-0.3-0.5-0.7-0.9-1.1-1.3c-0.5-0.4-1-0.7-1.7-0.9c-0.7-0.2-1.4-0.3-2.1-0.3H457V298z"/>
+	<path class="st2" d="M476.5,299.6c0-1.8,0.3-3.4,1-5c0.7-1.6,1.6-3,2.8-4.1c1.2-1.2,2.5-2.1,4.1-2.8c1.6-0.7,3.2-1,5-1
+		c1.8,0,3.4,0.3,5,1c1.6,0.7,3,1.6,4.1,2.8c1.2,1.2,2.1,2.6,2.8,4.1c0.7,1.6,1,3.3,1,5c0,1.8-0.3,3.5-1,5c-0.7,1.6-1.6,2.9-2.8,4.1
+		c-1.2,1.2-2.6,2.1-4.1,2.8c-1.6,0.7-3.2,1-5,1c-1.8,0-3.5-0.3-5-1c-1.6-0.7-2.9-1.6-4.1-2.8c-1.2-1.2-2.1-2.5-2.8-4.1
+		C476.8,303.1,476.5,301.4,476.5,299.6z M479.5,299.6c0,1.4,0.3,2.7,0.8,3.9c0.5,1.2,1.2,2.3,2.1,3.2c0.9,0.9,2,1.6,3.2,2.1
+		c1.2,0.5,2.5,0.8,3.9,0.8c1.4,0,2.7-0.3,3.9-0.8c1.2-0.5,2.3-1.2,3.2-2.1c0.9-0.9,1.6-2,2.1-3.2c0.5-1.2,0.8-2.5,0.8-3.9
+		c0-1.4-0.3-2.7-0.8-3.9c-0.5-1.2-1.2-2.3-2.1-3.2c-0.9-0.9-2-1.6-3.2-2.2c-1.2-0.5-2.5-0.8-3.9-0.8c-1.4,0-2.7,0.3-3.9,0.8
+		c-1.2,0.5-2.3,1.3-3.2,2.2c-0.9,0.9-1.6,2-2.1,3.2C479.8,296.9,479.5,298.2,479.5,299.6z"/>
+	<path class="st2" d="M510.3,312.2v-25.3h6c0.8,0,1.7,0.1,2.7,0.4c1,0.3,1.9,0.7,2.8,1.2c0.9,0.6,1.6,1.3,2.2,2.1
+		c0.6,0.9,0.9,1.9,0.9,3.1c0,1.8-0.5,3.3-1.4,4.5c-0.9,1.2-2.2,2-4,2.4l5.4,11.5h-3.3l-5.3-11.3h-0.6h-2.5v11.3H510.3z M513.3,298
+		h3.1c1.1,0,1.9-0.1,2.6-0.4c0.7-0.3,1.2-0.6,1.6-1c0.4-0.4,0.7-0.9,0.9-1.4c0.2-0.5,0.3-1,0.3-1.5c0-0.5-0.1-1-0.4-1.5
+		c-0.3-0.5-0.6-0.9-1.1-1.3c-0.5-0.4-1-0.7-1.7-0.9c-0.7-0.2-1.4-0.3-2.1-0.3h-3.2V298z"/>
+	<g>
+		<path class="st2" d="M413.4,299c-1.9-4.4-3.7-8.8-5.6-13.2c-0.5,1.2-1,2.4-1.5,3.6l3.6,8.5l0,0l1.9,4.4h0c0.7,1.7,1.4,3.3,2.1,5
+			c0.7,1.7,1.4,3.3,2.1,5h3C417.1,307.8,415.2,303.4,413.4,299z"/>
+		<path class="st2" d="M403.8,302.2l1.7-4.1h0c0.3-0.6,0.5-1.1,0.7-1.7l-1.5-3.6c-0.9,2-1.7,4.1-2.6,6.1c-1.8,4.4-3.7,8.8-5.6,13.2
+			h3c0.7-1.7,1.4-3.3,2.1-5C402.4,305.5,403.1,303.9,403.8,302.2L403.8,302.2z"/>
+	</g>
+	<g>
+		<path class="st2" d="M549.6,299c-1.9-4.4-3.7-8.8-5.6-13.2c-0.5,1.2-1,2.4-1.5,3.6l3.6,8.5l0,0l1.9,4.4h0c0.7,1.7,1.4,3.3,2.1,5
+			c0.7,1.7,1.4,3.3,2.1,5h3C553.3,307.8,551.5,303.4,549.6,299z"/>
+		<path class="st2" d="M540,302.2l1.7-4.1h0c0.2-0.6,0.5-1.1,0.7-1.7l-1.5-3.6c-0.9,2-1.7,4.1-2.6,6.1c-1.8,4.4-3.7,8.8-5.6,13.2h3
+			c0.7-1.7,1.4-3.3,2.1-5C538.6,305.5,539.3,303.9,540,302.2L540,302.2z"/>
+	</g>
+</g>
+<text transform="matrix(1 0 0 1 552.2891 279.3828)" class="st2 st3 st4">™</text>
+<text transform="matrix(1 0 0 1 396.5518 279.3828)" class="st2 st3 st5">Apache</text>
+</svg>
diff --git a/source/assets/img/aurora_logo_on_white.svg b/source/assets/img/aurora_logo_on_white.svg
new file mode 100644
index 0000000..63043d2
--- /dev/null
+++ b/source/assets/img/aurora_logo_on_white.svg
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 19.2.1, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+	 viewBox="0 0 841.9 595.3" style="enable-background:new 0 0 841.9 595.3;" xml:space="preserve">
+<style type="text/css">
+	.st0{fill:#5CC3AF;}
+	.st1{fill:#004865;}
+	.st2{fill:#004764;}
+	.st3{fill:#004664;}
+	.st4{font-family:'MyriadPro-Regular';}
+	.st5{font-size:12.5px;}
+	.st6{font-size:15px;}
+</style>
+<g>
+	<polygon class="st0" points="334.2,279.6 315.9,279.6 325.1,295.4 	"/>
+	<polygon class="st0" points="314.1,280.6 304.9,296.4 323.3,296.4 	"/>
+	<polygon class="st0" points="323.3,298.5 304.9,298.5 314.1,314.4 	"/>
+	<polygon class="st0" points="303.2,299.5 293.9,315.4 312.3,315.4 	"/>
+	<polygon class="st0" points="336,280.6 326.8,296.4 345.2,296.4 	"/>
+	<polygon class="st0" points="345.2,298.5 326.8,298.5 336,314.4 	"/>
+	<polygon class="st0" points="325.1,261.6 315.9,277.5 334.2,277.5 	"/>
+	<polygon class="st0" points="312.3,317.4 293.9,317.4 303.2,333.4 	"/>
+	<polygon class="st0" points="347,299.5 337.8,315.4 356.1,315.4 	"/>
+	<polygon class="st0" points="356.1,317.4 337.8,317.4 347,333.4 	"/>
+</g>
+<g>
+	<polygon class="st1" points="325.1,299.5 315.9,315.4 334.2,315.4 	"/>
+	<polygon class="st1" points="312.3,279.6 293.9,279.6 303.2,295.4 	"/>
+	<polygon class="st1" points="292.2,280.6 283,296.4 301.4,296.4 	"/>
+	<polygon class="st1" points="301.4,298.5 283,298.5 292.2,314.4 	"/>
+	<polygon class="st1" points="357.9,280.6 348.8,296.4 367.1,296.4 	"/>
+	<polygon class="st1" points="367.1,298.5 348.8,298.5 357.9,314.4 	"/>
+	<polygon class="st1" points="345.2,260.6 326.8,260.6 336,276.4 	"/>
+	<polygon class="st1" points="334.2,317.4 315.9,317.4 325.1,333.4 	"/>
+	<polygon class="st1" points="336,318.5 326.8,334.5 345.2,334.5 	"/>
+	<polygon class="st1" points="323.3,260.6 304.9,260.6 314.1,276.4 	"/>
+	<polygon class="st1" points="303.2,261.6 293.9,277.5 312.3,277.5 	"/>
+	<polygon class="st1" points="347,261.6 337.8,277.5 356.1,277.5 	"/>
+	<polygon class="st1" points="356.1,279.6 337.8,279.6 347,295.4 	"/>
+	<polygon class="st1" points="314.1,318.5 304.9,334.5 323.3,334.5 	"/>
+</g>
+<g>
+	<path class="st2" d="M426,319c-1.3,0-2.5-0.2-3.6-0.5c-1.1-0.4-2.1-0.9-2.9-1.6c-0.8-0.7-1.5-1.6-1.9-2.7c-0.5-1.1-0.7-2.3-0.7-3.6
+		v-17.1h3v17c0,0.9,0.1,1.7,0.5,2.4c0.3,0.7,0.7,1.3,1.3,1.8c0.6,0.5,1.2,0.8,1.9,1.1c0.8,0.3,1.6,0.4,2.5,0.4
+		c0.9,0,1.7-0.1,2.5-0.4c0.8-0.3,1.4-0.6,1.9-1.1c0.5-0.5,1-1.1,1.3-1.8c0.3-0.7,0.5-1.5,0.5-2.4v-17h3v17.1c0,1.4-0.2,2.6-0.7,3.6
+		c-0.5,1.1-1.1,1.9-1.9,2.7c-0.8,0.7-1.8,1.2-2.9,1.6C428.5,318.8,427.3,319,426,319z"/>
+	<path class="st2" d="M443.4,318.7v-25.3h6c0.8,0,1.7,0.1,2.7,0.4c1,0.3,1.9,0.7,2.8,1.2c0.9,0.6,1.6,1.3,2.2,2.1
+		c0.6,0.9,0.9,1.9,0.9,3.1c0,1.8-0.4,3.3-1.4,4.5c-0.9,1.2-2.2,2-4,2.4l5.4,11.5h-3.3l-5.3-11.3h-0.6h-2.5v11.3H443.4z M446.4,304.5
+		h3.1c1.1,0,1.9-0.1,2.6-0.4c0.7-0.3,1.2-0.6,1.6-1c0.4-0.4,0.7-0.9,0.9-1.4c0.2-0.5,0.3-1,0.3-1.5c0-0.5-0.1-1-0.4-1.5
+		c-0.3-0.5-0.7-0.9-1.1-1.3c-0.5-0.4-1-0.7-1.7-0.9c-0.7-0.2-1.4-0.3-2.1-0.3h-3.2V304.5z"/>
+	<path class="st2" d="M465.9,306.1c0-1.8,0.3-3.4,1-5c0.7-1.6,1.6-3,2.8-4.1c1.2-1.2,2.5-2.1,4.1-2.8c1.6-0.7,3.2-1,5-1
+		c1.8,0,3.4,0.3,5,1c1.6,0.7,3,1.6,4.1,2.8c1.2,1.2,2.1,2.6,2.8,4.1c0.7,1.6,1,3.2,1,5c0,1.8-0.3,3.5-1,5c-0.7,1.6-1.6,2.9-2.8,4.1
+		c-1.2,1.2-2.6,2.1-4.1,2.8c-1.6,0.7-3.3,1-5,1c-1.8,0-3.5-0.3-5-1c-1.6-0.7-2.9-1.6-4.1-2.8c-1.2-1.2-2.1-2.5-2.8-4.1
+		C466.3,309.6,465.9,307.9,465.9,306.1z M468.9,306.1c0,1.4,0.3,2.7,0.8,3.9c0.5,1.2,1.2,2.3,2.1,3.2c0.9,0.9,2,1.6,3.2,2.1
+		c1.2,0.5,2.5,0.8,3.9,0.8c1.4,0,2.7-0.3,3.9-0.8c1.2-0.5,2.3-1.2,3.2-2.1c0.9-0.9,1.6-2,2.1-3.2c0.5-1.2,0.8-2.5,0.8-3.9
+		c0-1.4-0.3-2.7-0.8-3.9c-0.5-1.2-1.2-2.3-2.1-3.2c-0.9-0.9-2-1.6-3.2-2.2c-1.2-0.5-2.5-0.8-3.9-0.8c-1.4,0-2.7,0.3-3.9,0.8
+		c-1.2,0.5-2.3,1.2-3.2,2.2c-0.9,0.9-1.6,2-2.1,3.2C469.2,303.4,468.9,304.7,468.9,306.1z"/>
+	<path class="st2" d="M499.7,318.7v-25.3h6c0.8,0,1.7,0.1,2.7,0.4c1,0.3,1.9,0.7,2.8,1.2c0.9,0.6,1.6,1.3,2.2,2.1
+		c0.6,0.9,0.9,1.9,0.9,3.1c0,1.8-0.5,3.3-1.4,4.5c-0.9,1.2-2.2,2-4,2.4l5.4,11.5h-3.3l-5.3-11.3h-0.6h-2.5v11.3H499.7z M502.7,304.5
+		h3.1c1.1,0,1.9-0.1,2.6-0.4c0.7-0.3,1.2-0.6,1.6-1c0.4-0.4,0.7-0.9,0.9-1.4c0.2-0.5,0.2-1,0.2-1.5c0-0.5-0.1-1-0.4-1.5
+		c-0.3-0.5-0.6-0.9-1.1-1.3c-0.5-0.4-1-0.7-1.7-0.9c-0.7-0.2-1.4-0.3-2.1-0.3h-3.2V304.5z"/>
+	<g>
+		<path class="st2" d="M402.8,305.5c-1.9-4.4-3.7-8.8-5.6-13.2c-0.5,1.2-1,2.4-1.5,3.6l3.6,8.5l0,0l1.9,4.4h0c0.7,1.7,1.4,3.3,2.1,5
+			c0.7,1.7,1.4,3.3,2.1,5h3C406.5,314.3,404.6,309.9,402.8,305.5z"/>
+		<path class="st2" d="M393.2,308.7l1.7-4.1h0c0.3-0.6,0.5-1.1,0.7-1.7l-1.5-3.6c-0.9,2-1.7,4.1-2.6,6.1c-1.8,4.4-3.7,8.8-5.6,13.2
+			h3c0.7-1.7,1.4-3.3,2.1-5C391.8,312,392.5,310.4,393.2,308.7L393.2,308.7z"/>
+	</g>
+	<g>
+		<path class="st2" d="M539,305.5c-1.9-4.4-3.7-8.8-5.6-13.2c-0.5,1.2-1,2.4-1.5,3.6l3.6,8.5l0,0l1.9,4.4h0c0.7,1.7,1.4,3.3,2.1,5
+			c0.7,1.7,1.4,3.3,2.1,5h3C542.7,314.3,540.9,309.9,539,305.5z"/>
+		<path class="st2" d="M529.5,308.7l1.7-4.1h0c0.2-0.6,0.5-1.1,0.7-1.7l-1.5-3.6c-0.9,2-1.7,4.1-2.6,6.1c-1.8,4.4-3.7,8.8-5.6,13.2
+			h3c0.7-1.7,1.4-3.3,2.1-5C528,312,528.8,310.4,529.5,308.7L529.5,308.7z"/>
+	</g>
+</g>
+<text transform="matrix(1 0 0 1 544.0156 285.3135)" class="st3 st4 st5">™</text>
+<text transform="matrix(1 0 0 1 385.9736 285.3135)" class="st3 st4 st6">Apache</text>
+</svg>
diff --git a/source/blog.html.erb b/source/blog.html.erb
index 7c5b7f1..085844c 100644
--- a/source/blog.html.erb
+++ b/source/blog.html.erb
@@ -3,16 +3,21 @@
 breadcrumb: Blog
 ---
 <div class="row-fluid">
-	<div class="col-md-3">
-		<h4>Apache Aurora Blog</h4>
-		<p>The Aurora blog is a place for release announcements and for project committers to highlight features of the software.</p>
-	</div>
-	<div class="col-md-9">
-		<% blog.articles.each do |post| %>
-		  <article>
-		  	<h2><%= link_to(post.title, post.url)%></h2>
-			<p><em>Posted by <%= post.data.post_author.display_name %>, <%= post.date.strftime("%B %e, %Y") %></em></p>
-		  </article>
-		<% end %>
-	</div>
-</div>
\ No newline at end of file
+  <div class="col-md-3">
+    <h4>Apache Aurora Blog</h4>
+    <p>The Aurora blog is a place for release announcements and for project committers to highlight
+       features of the software.</p>
+  </div>
+  <div class="col-md-9">
+  <% blog.articles.each do |post| %>
+  <article>
+    <h3>
+      <a href="<%= post.url %>"><%= post.title %></a>
+    </h3>
+    <em><time><%= post.date.strftime('%b %e %Y') %></time></em>
+    <%= post.summary %>
+    <a href="<%= post.url %>">Read more</a>
+  </article>
+  <% end %>
+  </div>
+</div>
diff --git a/source/blog/2014-12-08-aurora-0-6-0-incubating-released.md b/source/blog/2014-12-08-aurora-0-6-0-incubating-released.md
index 9114159..dc5b329 100644
--- a/source/blog/2014-12-08-aurora-0-6-0-incubating-released.md
+++ b/source/blog/2014-12-08-aurora-0-6-0-incubating-released.md
@@ -1,6 +1,6 @@
 ---
 layout: post
-title: Aurora 0.6.0-incubating Released
+title: 0.6.0-incubating Released
 permalink: /blog/aurora-0-6-0-incubating-released/
 published: true
 post_author:
@@ -15,35 +15,35 @@
  * Improvements to the Aurora web UI, including visualization of job updates
  * Added automatic service registration in ZooKeeper ([AURORA-587](https://issues.apache.org/jira/browse/AURORA-587))
  * Updates to Aurora client version two, deprecation of v1 coming in future release
- * Lots of new  [documentation](http://aurora.incubator.apache.org/documentation/latest/). Documentation now includes pages for [deploying the Aurora scheduler](http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/), [cron jobs](http://aurora.incubator.apache.org/documentation/latest/cron-jobs/), [SLA measurement](http://aurora.incubator.apache.org/documentation/latest/sla/), [storage](http://aurora.incubator.apache.org/documentation/latest/storage/), and [storage configuration and measurement](http://aurora.incubator.apache.org/documentation/latest/storage-config/).
+ * Lots of new  [documentation](http://aurora.apache.org/documentation/0.6.0-incubating/). Documentation now includes pages for [deploying the Aurora scheduler](http://aurora.apache.org/documentation/0.6.0-incubating/deploying-aurora-scheduler/), [cron jobs](http://aurora.apache.org/documentation/0.6.0-incubating/cron-jobs/), [SLA measurement](http://aurora.apache.org/documentation/0.6.0-incubating/sla/), [storage](http://aurora.apache.org/documentation/0.6.0-incubating/storage/), and [storage configuration and measurement](http://aurora.apache.org/documentation/0.6.0-incubating/storage-config/).
 
-Full release notes are available in the release [CHANGELOG](https://git-wip-us.apache.org/repos/asf?p=incubator-aurora.git&f=CHANGELOG&hb=0.6.0-rc2).
+Full release notes are available in the release [CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.6.0-incubating).
 
 ## Highly-available, scheduler-driven updates
 Rolling updates of services is a crucial feature in Aurora. As such, we
 want to take great care when changing its behavior. Previously, Aurora operated
 by delegating this functionality to the client (or any API client, for that
-matter). In this version, the scheduler can take over the 
-responsibility of application update orchestration. Further details were discussed on the [Aurora mailing list](http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/201407.mbox/%3CCAGRA8uMxwVDokp_iHXhNru2gd-x_nM%2BDYAurpfAO6wuX7%3DnHFw%40mail.gmail.com%3E).
+matter). In this version, the scheduler can take over the
+responsibility of application update orchestration. Further details were discussed on the [Aurora mailing list](http://mail-archives.apache.org/mod_mbox/aurora-dev/201407.mbox/%3CCAGRA8uMxwVDokp_iHXhNru2gd-x_nM%2BDYAurpfAO6wuX7%3DnHFw%40mail.gmail.com%3E).
 
 ## Aurora Web UI Improvements
 Since the scheduler can now orchestrate job updates, it has awareness of the progress and outcome of updates.  This means you can see a progress bar for in-flight updates, and the history of updates for your jobs.  Additionally, the performance of the UI was improved, especially for large roles and jobs [AURORA-458](https://issues.apache.org/jira/browse/AURORA-458).
 
 ## Service Announcement and Management
-Job configurations can now supply an [`announce` parameter](http://aurora.incubator.apache.org/documentation/latest/configuration-reference/#announcer-objects), which is meant to be a way to opt-in for registration in a service discovery system. This has been implemented in the Aurora executor, and will automatically announce tasks via ZooKeeper.
+Job configurations can now supply an [`announce` parameter](http://aurora.apache.org/documentation/0.6.0-incubating/configuration-reference/#announcer-objects), which is meant to be a way to opt-in for registration in a service discovery system. This has been implemented in the Aurora executor, and will automatically announce tasks via ZooKeeper.
 
 ## Aurora Client Improvements
-Progress was made in features to [v2](http://aurora.incubator.apache.org/documentation/latest/clientv2/) of the [Aurora client](http://aurora.incubator.apache.org/documentation/latest/client-commands/). We will support version 1 and version 2 for 0.6.0, with version 1 to be removed in 0.7.0.
+Progress was made in features to v2 of the [Aurora client](http://aurora.apache.org/documentation/0.6.0-incubating/client-commands/). We will support version 1 and version 2 for 0.6.0, with version 1 to be removed in 0.7.0.
 
 ## Improved Project Documentation
 New documentation pages including:
 
- * [Deploying the Aurora scheduler](http://aurora.incubator.apache.org/documentation/latest/deploying-aurora-scheduler/)
- * [Cron jobs](http://aurora.incubator.apache.org/documentation/latest/cron-jobs/)
- * [SLA measurement](http://aurora.incubator.apache.org/documentation/latest/sla/)
- * [Storage](http://aurora.incubator.apache.org/documentation/latest/storage/)
- * [Storage configuration and measurement](http://aurora.incubator.apache.org/documentation/latest/storage-config/)
- * [Monitoring](http://aurora.incubator.apache.org/documentation/latest/monitoring/)
+ * [Deploying the Aurora scheduler](http://aurora.apache.org/documentation/0.6.0-incubating/deploying-aurora-scheduler/)
+ * [Cron jobs](http://aurora.apache.org/documentation/0.6.0-incubating/cron-jobs/)
+ * [SLA measurement](http://aurora.apache.org/documentation/0.6.0-incubating/sla/)
+ * [Storage](http://aurora.apache.org/documentation/0.6.0-incubating/storage/)
+ * [Storage configuration and measurement](http://aurora.apache.org/documentation/0.6.0-incubating/storage-config/)
+ * [Monitoring](http://aurora.apache.org/documentation/0.6.0-incubating/monitoring/)
 
 ## Getting Involved
 We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the [user mailing list and IRC](https://aurora.apache.org/community). The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
@@ -51,4 +51,19 @@
 ## Thanks
 Thanks to the 16 contributors who made 0.6.0-incubating possible:
 
-Bill Farner, Maxim Khutornenko, Kevin Sweeney, Mark Chu-Carroll, Joshua Cohen, Zameer Manji, David McLaughlin, Brian Wickman, Joe Smith, Jake Farrell, Matthew Jeffryes, Dominic Hamon, Bjoern Metzdorf, Joseph Glanville, David Robinson, David Pan.
\ No newline at end of file
+* Bill Farner
+* Maxim Khutornenko
+* Kevin Sweeney
+* Mark Chu-Carroll
+* Joshua Cohen
+* Zameer Manji
+* David McLaughlin
+* Brian Wickman
+* Joe Smith
+* Jake Farrell
+* Matthew Jeffryes
+* Dominic Hamon
+* Bjoern Metzdorf
+* Joseph Glanville
+* David Robinson
+* David Pan
diff --git a/source/blog/2015-02-12-aurora-0-7-0-incubating-released.md b/source/blog/2015-02-12-aurora-0-7-0-incubating-released.md
new file mode 100644
index 0000000..2b7338c
--- /dev/null
+++ b/source/blog/2015-02-12-aurora-0-7-0-incubating-released.md
@@ -0,0 +1,62 @@
+---
+layout: post
+title: 0.7.0-incubating Released, Adding Support for Docker Containers
+permalink: /blog/aurora-0-7-0-incubating-released/
+published: true
+post_author:
+  display_name: Maxim Khutornenko
+  twitter: hutorrr
+tags: Release
+---
+
+The latest Apache Aurora release, 0.7.0-incubating, is now [available for download](http://aurora.apache.org/downloads/). This version marks the third Aurora release since becoming part of the Apache Incubator, and includes the following features, improvements, and announcements:
+
+* Beta support for running Docker containers
+* Official support for the Aurora command-line client v2
+* Performance improvements for running Aurora at scale
+* Progress made toward Apache Incubator graduation
+
+Full release notes are available in the release [CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.7.0-incubating).
+
+
+## Docker support within Aurora
+
+Docker has quickly become a popular technology for packaging and managing applications, making it simple to create a snapshot of your app and ship it to different machines in your datacenter. In the Aurora 0.7.0 release, we’ve added a container abstraction that supports Docker out of the box (and potentially future container technologies that may emerge), allowing you to specify a Docker image and schedule it to run within your Aurora cluster.
+
+This feature is marked as experimental in its first release, but we hope to see companies running it in production and helping improve it moving forward. Native Docker support within the Mesos core was added in the [Mesos 0.20.0 release](http://mesos.apache.org/blog/mesos-0-20-0-released/).
+
+## Official support for command-line client v2
+
+Over several releases, Aurora has included a new command-line client version two, rewritten from the ground-up to be simpler and easier to use. In the Aurora 0.7.0 release, we now provide official support for version two of the client, and have fully removed the original version. The previous client version was deprecated in the 0.6.0-incubating release.
+
+We’ve also made the client even-easier to use, by improving the help output that you receive. You can simply type `aurora -h` for a full list of available commands and helpful guidance. We’ve also documented [usage of the client commands](http://aurora.apache.org/documentation/0.7.0-incubating/client-commands/) to provide helpful information about managing jobs.
+
+## Performance Improvements
+
+Performance is continually an area of focus and improvement for the Aurora team, with production Aurora clusters measuring tens of thousands of machines and tens-to-hundreds of thousands of tasks. Two specific improvements were made in this release that make signficant improvements on large clusters, including [AURORA-121](https://issues.apache.org/jira/browse/AURORA-121) which made task pre-emption more efficient, and [AURORA-930](https://issues.apache.org/jira/browse/AURORA-930) which fixed GC performance issues with snapshot deduplication on large clusters.
+
+## Progress toward becoming Top-Level Project
+
+As the Apache Aurora project continues to grow in adopters and contributors, we’ve begun conversations regarding next-steps toward graduating from the Apache Incubator. A graduation resolution is expected to be presented to the Aurora community on the developer mailing list (dev@aurora.apache.org, you can subscribe by emailing dev-subscribe@aurora.apache.org) in the coming weeks and voted on, and hopefully shared with the Apache board shortly after.
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the [user mailing list and IRC](https://aurora.apache.org/community). The community also holds weekly IRC meetings at 11AM Pacific every Monday that we encourage you to join. If you're using Aurora, we'd love to hear from you on how to make it better.
+
+## Thanks
+
+Thanks to the 13 contributors who made 0.7.0-incubating possible:
+
+* Bill Farner
+* Brian Wickman
+* Dave Lester
+* David McLaughlin
+* Florian Pfeiffer
+* Issam EL ATIF
+* Jake Farrell
+* Jeffrey Schroeder
+* Joshua Cohen
+* Kevin Sweeney
+* Maxim Khutornenko
+* Steve Niemitz
+* Zameer Manji
diff --git a/source/blog/2015-03-18-2015-upcoming-apache-aurora-meetups.md b/source/blog/2015-03-18-2015-upcoming-apache-aurora-meetups.md
new file mode 100644
index 0000000..e5f442d
--- /dev/null
+++ b/source/blog/2015-03-18-2015-upcoming-apache-aurora-meetups.md
@@ -0,0 +1,37 @@
+---
+layout: post
+title: Upcoming Apache Aurora Meetups in NYC and the Bay Area
+permalink: /blog/2015-upcoming-apache-aurora-meetups/
+published: true
+post_author:
+  display_name: Dave Lester
+  twitter: davelester
+tags: Community, Meetups
+---
+
+Meetups are opportunities to bring together members of an open source community to learn about the project, share best practices, and meet one another; naturally we're excited to see a growing number of events featuring Apache Aurora, and we hope you'll join us at a meetup near you!
+
+There are two upcoming Apache Aurora meetups on our radar:
+
+ * New York City, March 25th, 2015 - [NYC MUG, Apache Aurora meetup](http://www.meetup.com/Apache-Mesos-NYC-Meetup/events/220801840/)
+ * Bay Area, April 30th, 2015 - [Bay Area Apache Aurora Users meetup, From Monolith to Microservices w/ Aurora](http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/)
+
+## Apache Aurora Users Groups
+
+Live in San Francisco or the surrounding Bay Area? Join the recently-established [Bay Area Apache Aurora Users Group](http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/), which will be home to future Apache Aurora meetups, in addition to the existing [Apache Mesos User Group](www.meetup.com/Bay-Area-Mesos-User-Group/).
+
+If you're interested in establishing an Apache Aurora Users Group in a city near you or you’re hosting an Apache Aurora talk, please let us know on the project mailing list (dev@aurora.apache.org) or IRC (#aurora on freenode.net) and we'll help you promote your events.
+
+## Recent Meetup Videos
+
+On February 19th, the Bay Area Apache Mesos User Group hosted a meetup on Apache Aurora, and videos from the two talks are now online. Check them out:
+
+_Using Apache Aurora + Docker to migrate to a microservice architecture by Steve Niemitz (TellApart)_
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/ZZXtXLvTXAE" frameborder="0" allowfullscreen></iframe>
+
+_Running Apache Aurora and Mesos at Twitter by Joe Smith (Twitter)_
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/E4lxX6epM_U" frameborder="0" allowfullscreen></iframe>
+
+Hope to see you at a future meetup!
\ No newline at end of file
diff --git a/source/blog/2015-05-14-aurora-0-8-0-released.md b/source/blog/2015-05-14-aurora-0-8-0-released.md
new file mode 100644
index 0000000..63d3539
--- /dev/null
+++ b/source/blog/2015-05-14-aurora-0-8-0-released.md
@@ -0,0 +1,75 @@
+---
+layout: post
+title: 0.8.0 Released
+permalink: /blog/aurora-0-8-0-released/
+published: true
+post_author:
+  display_name: Jake Farrell
+  twitter: eatfresh
+tags: Release
+---
+
+The latest Apache Aurora release, 0.8.0, is now available for [download](http://aurora.apache.org/downloads/). This version marks the first Aurora release since becoming a top-level project at the Apache Software Foundation, and includes the following features, improvements, and announcements:
+
+* Added support for external update coordination
+* Security for scheduler operations, using Apache Shiro
+* Scheduler performance improvements
+* 0.8.0 deprecations
+
+Full release notes are available in the release [CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.8.0).
+
+## Support for external update coordination
+
+With the introduction of scheduler-driven job update orchestration, Aurora now optionally supports an inverted dependency model where the updater can willingly pause job update progress upon reaching certain checkpoints and wait for the client/external service to explicitly "ack" on it. Further details are outlined in the [coordinated job updates](http://aurora.apache.org/documentation/0.8.0/client-commands/#coordinated-job-updates) documentation.
+
+This feature may also be useful for building external update coordination services where Aurora service job upgrades are controlled by application specific health tracking systems, throttling individual job updates based on the internal health/traffic metrics.
+
+## Security for scheduler operations, using Apache Shiro
+
+Aurora now uses [Apache Shiro](http://shiro.apache.org) to manage security for scheduler operations, including authenticated write access (HTTP Basic Auth or Kerberos), and fine-grained ACLs for scheduler RPCs.
+
+Further details are outlined in the [security documentation](http://aurora.apache.org/documentation/0.8.0/security/).
+
+## Scheduler performance improvements
+
+Performance is continually an area of focus and improvement for the Aurora team, with production Aurora clusters measuring tens of thousands of machines and tens-to-hundreds of thousands of tasks. Two specific [scheduler performance improvements](https://issues.apache.org/jira/browse/AURORA-999) were a focus of the 0.8.0 release, including [improved preemption efficiency](https://issues.apache.org/jira/browse/AURORA-1219), and creating a [scheduler performance benchmark framework](https://issues.apache.org/jira/browse/AURORA-969).
+
+## 0.8.0 deprecations
+
+The latest Aurora release includes the [several removals and deprecations](https://issues.apache.org/jira/browse/AURORA-905), including:
+
+* [AURORA-975](https://issues.apache.org/jira/browse/AURORA-975), Removing populatedDEPRECATED from PopulateJobResult
+* [AURORA-1190](https://issues.apache.org/jira/browse/AURORA-1190), Added a deprecation warning when the client-side updater is used; will be removed in 0.9.0
+* [AURORA-1240](https://issues.apache.org/jira/browse/AURORA-1240), Added a deprecation warning for restart_theshold setting
+* [AURORA-1189](https://issues.apache.org/jira/browse/AURORA-1189), Added a warning in the scheduler when the JVM version is < 1.8
+* Removal of the custom security API in favor of integration with Apache Shiro
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 21 contributors who made Apache Aurora 0.8.0 possible:
+
+* Ben Mahler
+* Bhuvan Arumugam
+* Bill Farner
+* Brian Brazil
+* Brian Wickman
+* Dave Lester
+* David Robinson
+* Florian Pfeiffer
+* Jake Farrell
+* Joe Smith
+* Joshua Cohen
+* Kevin Sweeney
+* Maxim Khutornenko
+* Ricardo Cervera-Navarro
+* Sanyogeeta Lawande
+* Stephan Erb
+* Steve Niemitz
+* Thorhallur Sverrisson
+* Tony Dong
+* Zameer Manji
+* Zeke Harris
diff --git a/source/blog/2015-07-24-aurora-0-9-0-released.md b/source/blog/2015-07-24-aurora-0-9-0-released.md
new file mode 100644
index 0000000..e119f15
--- /dev/null
+++ b/source/blog/2015-07-24-aurora-0-9-0-released.md
@@ -0,0 +1,59 @@
+---
+layout: post
+title: 0.9.0 Released
+permalink: /blog/aurora-0-9-0-released/
+published: true
+post_author:
+  display_name: Jake Farrell
+  twitter: eatfresh
+tags: Release
+---
+
+The latest Apache Aurora release, 0.9.0, is now available for [download](http://aurora.apache.org/downloads/). This version includes many new features and improvements including:
+
+* Now requires JRE 8 or greater.
+* GC executor is fully replaced by the task state reconciliation (AURORA-1047).
+* The scheduler command line argument 'enable_legacy_constraints' has been
+  removed, and the scheduler no longer automatically injects 'host' and 'rack'
+  constraints for production services. (AURORA-1074)
+* SLA metrics for non-production jobs have been disabled by default. They can
+  be enabled via the scheduler command line. Metric names have changed from
+  '...nonprod_ms' to "...ms_nonprod" (AURORA-1350).
+
+Full release notes are available in the release [CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.9.0).
+
+
+## 0.9.0 deprecations
+
+The latest Aurora release includes the [several removals and deprecations](https://issues.apache.org/jira/browse/AURORA-1079), including:
+
+* AURORA-1074 - Remove the "enable_legacy_constraints" flag.
+* AURORA-1139 - Remove backwards compatibility shims from JobUpdateKey introduction
+
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 18 contributors who made Apache Aurora 0.9.0 possible:
+
+* Bill Farner
+* Jake Farrell
+* Kevin Sweeney
+* Joe Smith
+* Andrew Jorgensen
+* Steve Salevan
+* Brian Wickman
+* Brian Brazil
+* Stephan Erb
+* Zameer Manji
+* Maxim Khutornenko
+* David McLaughlin
+* Michael Leinartas
+* Benjamin Staffin
+* Dave Lester
+* Jeffrey Schroeder
+* Will Swank
+* Ben Mahler
diff --git a/source/blog/2015-08-26-aurora-at-mesoscon-seattle.md b/source/blog/2015-08-26-aurora-at-mesoscon-seattle.md
new file mode 100644
index 0000000..408be38
--- /dev/null
+++ b/source/blog/2015-08-26-aurora-at-mesoscon-seattle.md
@@ -0,0 +1,26 @@
+---
+layout: post
+title: "Recap: Aurora at MesosCon Seattle"
+permalink: /blog/aurora-at-mesoscon-seattle/
+published: true
+post_author:
+  display_name: Dave Lester
+  twitter: davelester
+tags: Presentations, Video
+---
+
+Last week was the second-ever #MesosCon in Seattle, WA and the Apache Aurora project was represented in several talks by committers and adopters. Check them out! We look forward to participating in future Mesos community events.
+
+READMORE
+## MesosCon Presentations
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/nNrh-gdu9m4" frameborder="0" allowfullscreen></iframe>
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/ZHABassUOlo" frameborder="0" allowfullscreen></iframe>
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/y1hi7K1lPkk" frameborder="0" allowfullscreen></iframe>
+
+<iframe width="560" height="315" src="https://www.youtube.com/embed/ZQenXkoG_7o" frameborder="0" allowfullscreen></iframe>
+
+## Hackathon Winners and Demos
+Congrats to the Paypal team, who won second place at the MesosCon hackathon for their [Docker Compose Executor](https://github.com/mohitsoni/compose-executor). The team [demonstrated this project onstage](https://www.youtube.com/watch?v=KES1Ud4MtDE&t=10m33s), running by Apache Aurora.
diff --git a/source/blog/2015-12-12-aurora-0-10-0-released.md b/source/blog/2015-12-12-aurora-0-10-0-released.md
new file mode 100644
index 0000000..fac8c0a
--- /dev/null
+++ b/source/blog/2015-12-12-aurora-0-10-0-released.md
@@ -0,0 +1,56 @@
+---
+layout: post
+title: 0.10.0 Released
+permalink: /blog/aurora-0-10-0-released/
+published: true
+post_author:
+  display_name: Bill Farner
+  twitter: wfarner
+tags: Release
+---
+
+The latest Apache Aurora release, 0.10.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+* Upgraded Mesos to 0.23.0. Note: the Aurora executor now requires openssl runtime dependencies that
+  were not previously required. You will need libcurl available on every Mesos slave (or Docker
+  container) to successfully launch the Aurora executor. See
+  https://github.com/apache/mesos/blob/ebcf8cc2f2f6c236f6e9315447c247e6348141e1/docs/getting-started.md
+  for more details on Mesos runtime dependencies.
+* Resource quota is no longer consumed by production jobs with a dedicated constraint (AURORA-1457).
+* In the scheduler API, the `ConfigGroup.instanceIds` field has been deprecated, please use
+  `ConfigGroup.instances` instead.
+* In the scheduler API, all `SessionKey` arguments are now ignored by the scheduler as it has been
+  replaced by [security features](/documentation/0.10.0/security/) added in 0.8.0.
+* The `--root` argument has been removed from the Thermos observer.
+
+Full release notes are available in the release [CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.10.0).
+
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues, please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 19 contributors who made Apache Aurora 0.10.0 possible:
+
+* Andrew Jorgensen
+* Bill Farner
+* Brian Brazil
+* David Robinson
+* Dmitriy Shirchenko
+* Jake Farrell
+* Jeffrey Schroeder
+* Joe Smith
+* John Sirois
+* Joshua Cohen
+* Kevin Sweeney
+* Mauricio Garavaglia
+* Maxim Khutornenko
+* Stephan Erb
+* Steve Niemitz
+* Thorhallur Sverrisson
+* Zameer Manji
+* Zane Silver
+* Jake Farrell
diff --git a/source/blog/2015-12-23-aurora-0-11-0-released.md b/source/blog/2015-12-23-aurora-0-11-0-released.md
new file mode 100644
index 0000000..653d7ff
--- /dev/null
+++ b/source/blog/2015-12-23-aurora-0-11-0-released.md
@@ -0,0 +1,67 @@
+---
+layout: post
+title: 0.11.0 Released
+permalink: /blog/aurora-0-11-0-released/
+published: true
+post_author:
+  display_name: Bill Farner
+  twitter: wfarner
+tags: Release
+---
+
+The latest Apache Aurora release, 0.11.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+**New/updated**
+
+* Added built-in support for
+  [log rotation](/documentation/0.11.0/configuration-reference/#logger) of task stderr/stdout.
+* Added support for performing task health checks using a
+  [shell command](/documentation/0.11.0/configuration-reference/#healthcheckconfig-objects).
+* Added support for accepting an executor configuration in JSON via scheduler command line argument
+  `--custom_executor_config` which will override all other the command line arguments and default
+   values pertaining to the executor.
+* Added a new scheduler flag `--framework_announce_principal` to support use of authorization and
+  rate limiting in Mesos.
+* Upgraded Mesos to 0.24.1.
+
+**Deprecations and removals**
+
+* The client-side updater has been removed, along with the CLI commands that used it:
+  `aurora job update` and `aurora job cancel-update`.  Users are encouraged to take
+  advantage of scheduler-driven updates (see `aurora update -h` for usage), which has been a
+  stable feature for several releases.
+* To support configuration of shell-based health checkers, the `HealthCheckConfig` schema has been
+  restructured to more cleanly allow configuring varied types of health checkers.  The following
+  fields from `HealthCheckConfig` are now deprecated: `endpoint`, `expected_response`,
+  `expected_response_code` in favor of setting them as part of an `HttpHealthChecker`.
+* In the scheduler API, the field `JobUpdateSettings.maxWaitToInstanceRunningMs`
+  (`UpdateConfig.restart_threshold` in client-side configuration) is now deprecated.
+  This setting was brittle in practice, and is ignored by the 0.11.0 scheduler.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.11.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The
+community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 13 contributors who made Apache Aurora 0.11.0 possible:
+
+* Bill Farner
+* Chris Bannister
+* Dmitriy Shirchenko
+* George Sirois
+* John Sirois
+* Joshua Cohen
+* Kevin Sweeney
+* Maxim Khutornenko
+* Paul Cavallaro
+* R.B. Boyer
+* Renan DelValle
+* Tengfei Mu
+* Zameer Manji
diff --git a/source/blog/2016-03-03-aurora-0-12-0-released.md b/source/blog/2016-03-03-aurora-0-12-0-released.md
new file mode 100644
index 0000000..c6252ec
--- /dev/null
+++ b/source/blog/2016-03-03-aurora-0-12-0-released.md
@@ -0,0 +1,98 @@
+---
+layout: post
+title: 0.12.0 Released
+permalink: /blog/aurora-0-12-0-released/
+published: true
+post_author:
+  display_name: Bill Farner
+  twitter: wfarner
+tags: Release
+---
+
+The latest Apache Aurora release, 0.12.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+  - Upgraded Mesos to 0.25.0.
+  - Upgraded the scheduler ZooKeeper client from 3.3.4 to 3.4.6.
+  - Added support for configuring Mesos role by passing `-mesos_role` to Aurora scheduler at start time.
+    This enables resource reservation for Aurora when running in a shared Mesos cluster.
+  - Aurora task metadata is now mapped to Mesos task labels. Labels are prefixed with
+    `org.apache.aurora.metadata.` to prevent clashes with other, external label sources.
+  - Added new scheduler flag `-default_docker_parameters` to allow a cluster operator to specify a
+    universal set of parameters that should be used for every container that does not have parameters
+    explicitly configured at the job level.
+  - Added support for jobs to specify arbitrary ZooKeeper paths for service registration.  See
+    [here](https://github.com/apache/aurora/blob/master/docs/configuration-reference.md#announcer-objects)
+    for details.
+  - Log destination is configurable for the thermos runner. See the configuration reference for details
+    on how to configure destination per-process. Command line options may also be passed through the
+    scheduler in order to configure the global default behavior.
+  - Env variables can be passed through to task processes by passing `--preserve_env`
+    to thermos.
+  - Changed scheduler logging to use logback.
+    Operators wishing to customize logging may do so with standard logback configuration as described
+    [here](http://logback.qos.ch/manual/configuration.html)
+  - When using `--read-json`, `aurora` can now load multiple jobs from one json file,
+    similar to the usual pystachio structure: `{"jobs": [job1, job2, ...]}`. The
+    older single-job json format is also still supported.
+  - `aurora config list` command now supports `--read-json`
+  - Added scheduler command line argument `-shiro_after_auth_filter`. Optionally specify a class
+    implementing javax.servlet.Filter that will be included in the Filter chain following the Shiro
+    auth filters.
+  - The `addInstances` thrift RPC does now increase job instance count (scale out) based on the
+    task template pointed by instance `key`.
+
+Deprecations and removals:
+
+  - Deprecated `AddInstancesConfig` argument in `addInstances` thrift RPC.
+  - Deprecated `TaskQuery` argument in `killTasks` thrift RPC to disallow killing tasks across
+    multiple roles. The new safer approach is using `JobKey` with `instances` instead.
+  - Removed the deprecated field 'ConfigGroup.instanceIds' from the API.
+  - Removed the following deprecated `HealthCheckConfig` client-side configuration fields: `endpoint`,
+    `expected_response`, `expected_response_code`.  These are now set exclusively in like-named fields
+    of `HttpHealthChecker.`
+  - Removed the deprecated 'JobUpdateSettings.maxWaitToInstanceRunningMs' thrift api field (
+    UpdateConfig.restart_threshold in client-side configuration). This aspect of job restarts is now
+    controlled exclusively via the client with `aurora job restart --restart-threshold=[seconds]`.
+  - Deprecated executor flag `--announcer-enable`. Enabling the announcer previously required both flags
+    `--announcer-enable` and `--announcer-ensemble`, but now only `--announcer-ensemble` must be set.
+    `--announcer-enable` is a no-op flag now and will be removed in future version.
+  - Removed scheduler command line arguments:
+    - `-enable_cors_support`.  Enabling CORS is now implicit by setting the argument
+      `-enable_cors_for`.
+    - `-deduplicate_snapshots` and `-deflate_snapshots`.  These features are good to always enable.
+    - `-enable_job_updates` and `-enable_job_creation`
+    - `-extra_modules`
+    - `-logtostderr`, `-alsologtostderr`, `-vlog`, `-vmodule`, and `-use_glog_formatter`. Removed
+       in favor of the new logback configuration.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=refs/tags/rel/0.12.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The
+community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 17 contributors who made Apache Aurora 0.12.0 possible:
+
+* Amol Deshmukh
+* Anant Vyas
+* Benjamin Staffin
+* Bill Farner
+* Dmitriy Shirchenko
+* George Sirois
+* John Sirois
+* Joshua Cohen
+* Kasisnu Singh
+* Kunal Thakar
+* Martin Hrabovcin
+* Mauricio Garavaglia
+* Maxim Khutornenko
+* Stephan Erb
+* Tony Dong
+* Zameer Manji
+* Zhitao Li
diff --git a/source/blog/2016-04-14-aurora-0-13-0-released.md b/source/blog/2016-04-14-aurora-0-13-0-released.md
new file mode 100644
index 0000000..1fd527b
--- /dev/null
+++ b/source/blog/2016-04-14-aurora-0-13-0-released.md
@@ -0,0 +1,102 @@
+---
+layout: post
+title: 0.13.0 Released
+permalink: /blog/aurora-0-13-0-released/
+published: true
+post_author:
+  display_name: Jake Farrell
+  twitter: eatfresh
+tags: Release
+---
+
+The latest Apache Aurora release, 0.13.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+  - Upgraded Mesos to 0.26.0
+  - Added a new health endpoint (/leaderhealth) which can be used for load balancer health
+    checks to always forward requests to the leading scheduler.
+  - Added a new `aurora job add` client command to scale out an existing job.
+  - Upgraded the scheduler ZooKeeper client from 3.4.6 to 3.4.8.
+  - Added support for dedicated constraints not exclusive to a particular role.
+    See [here](/documentation/0.13.0/features/constraints#dedicated-attribute) for more details.
+  - Added a new argument `--announcer-hostname` to thermos executor to override hostname in service
+    registry endpoint. See [here](/documentation/0.13.0/reference/configuration#announcer-objects) for details.
+  - Descheduling a cron job that was not actually scheduled will no longer return an error.
+  - Added a new argument `-thermos_home_in_sandbox` to the scheduler for optionally changing
+    HOME to the sandbox during thermos executor/runner execution. This is useful in cases
+    where the root filesystem inside of the container is read-only, as it moves PEX extraction into
+    the sandbox. See [here](/documentation/0.13.0/operations/configuration#docker-containers)
+    for more detail.
+  - Support for ZooKeeper authentication in the executor announcer. See
+    [here](/documentation/0.13.0/operations/security#announcer-authentication) for details.
+  - Scheduler H2 in-memory database is now using
+    [MVStore](http://www.h2database.com/html/mvstore.html)
+    In addition, scheduler thrift snapshots are now supporting full DB dumps for faster restarts.
+  - Added scheduler argument `-require_docker_use_executor` that indicates whether the scheduler
+    should accept tasks that use the Docker containerizer without an executor (experimental).
+  - Jobs referencing invalid tier name will be rejected by the scheduler.
+  - Added a new scheduler argument `--populate_discovery_info`. If set to true, Aurora will start
+    to populate DiscoveryInfo field on TaskInfo of Mesos. This could be used for alternative
+    service discovery solution like Mesos-DNS.
+  - Added support for automatic schema upgrades and downgrades when restoring a snapshot that contains
+    a DB dump.
+
+
+Deprecations and removals:
+
+  - Removed deprecated (now redundant) fields:
+    - `Identity.role`
+    - `TaskConfig.environment`
+    - `TaskConfig.jobName`
+    - `TaskQuery.owner`
+  - Removed deprecated `AddInstancesConfig` parameter to `addInstances` RPC.
+  - Removed deprecated executor argument `-announcer-enable`, which was a no-op in 0.12.0.
+  - Removed deprecated API constructs related to Locks:
+    - removed RPCs that managed locks
+      - `acquireLock`
+      - `releaseLock`
+      - `getLocks`
+    - removed `Lock` parameters to RPCs
+      - `createJob`
+      - `scheduleCronJob`
+      - `descheduleCronJob`
+      - `restartShards`
+      - `killTasks`
+      - `addInstances`
+      - `replaceCronTemplate`
+  - Task ID strings are no longer prefixed by a timestamp.
+  - Changes to the way the scheduler reads command line arguments
+    - Removed support for reading command line argument values from files.
+    - Removed support for specifying command line argument names with fully-qualified class names.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git;a=blob;f=CHANGELOG;h=7afa22f58052be26d37f85df4c8406c0bf79a57e;hb=refs/heads/master).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The
+community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 18 contributors who made Apache Aurora 0.13.0 possible:
+
+* Amol Deshmukh
+* Ashwin Murthy
+* Benjamin Staffin
+* Bill Farner
+* Dmitriy Shirchenko
+* Florian Pfeiffer
+* George Sirois
+* Jake Farrell
+* John Sirois
+* Joshua Cohen
+* Krish
+* Kunal Thakar
+* Maxim Khutornenko
+* Se Choi
+* Stephan Erb
+* Tony Dong
+* Zameer Manji
+* Zhitao Li
diff --git a/source/blog/2016-06-14-aurora-0-14-0-released.md b/source/blog/2016-06-14-aurora-0-14-0-released.md
new file mode 100644
index 0000000..ebfe583
--- /dev/null
+++ b/source/blog/2016-06-14-aurora-0-14-0-released.md
@@ -0,0 +1,89 @@
+---
+layout: post
+title: 0.14.0 Released
+permalink: /blog/aurora-0-14-0-released/
+published: true
+post_author:
+  display_name: Stephan Erb
+  twitter: ErbStephan
+tags: Release
+---
+
+The latest Apache Aurora release, 0.14.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+  - Upgraded Mesos to 0.27.2
+  - Added a new optional [Apache Curator](https://curator.apache.org/) backend for performing
+    scheduler leader election. You can enable this with the new `-zk_use_curator` scheduler argument.
+  - Adding --nosetuid-health-checks flag to control whether the executor runs health checks as the
+    job's role's user.
+  - New scheduler command line argument `-offer_filter_duration` to control the time after which we
+    expect Mesos to re-offer unused resources. A short duration improves scheduling performance in
+    smaller clusters, but might lead to resource starvation for other frameworks if you run multiple
+    ones in your cluster. Uses the Mesos default of 5s.
+  - New scheduler command line option `-framework_name`  to change the name used for registering
+    the Aurora framework with Mesos. The current default value is 'TwitterScheduler'.
+  - Added experimental support for launching tasks using filesystem images and the Mesos [unified
+    containerizer](https://github.com/apache/mesos/blob/master/docs/container-image.md). See that
+    linked documentation for details on configuring Mesos to use the unified containerizer. Note that
+    earlier versions of Mesos do not fully support the unified containerizer. Mesos 0.28.x or later is
+    recommended for anyone adopting task images via the Mesos containerizer.
+  - Upgraded to pystachio 0.8.1 to pick up support for the new [Choice type](https://github.com/wickman/pystachio/blob/v0.8.1/README.md#choices).
+  - The `container` property of a `Job` is now a Choice of either a `Container` holder, or a direct
+    reference to either a `Docker` or `Mesos` container.
+  - New scheduler command line argument `-ip` to control what ip address to bind the schedulers http
+    server to.
+  - Added experimental support for Mesos GPU resource. This feature will be available in Mesos 1.0
+    and is disabled by default. Use `-allow_gpu_resource` flag to enable it. Once this feature is
+    enabled, creating jobs with GPU resource will make scheduler snapshot backwards incompatible.
+    For further further details, please see the full release notes.
+  - Experimental support for a webhook feature which POSTs all task state changes to a user defined
+    endpoint.
+  - Added support for specifying the default tier name in tier configuration file (`tiers.json`). The
+    `default` property is required and is initialized with the `preemptible` tier (`preemptible` tier
+    tasks can be preempted but their resources cannot be revoked).
+
+Deprecations and removals:
+
+  - Deprecated `--restart-threshold` option in the `aurora job restart` command to match the job
+    updater behavior. This option has no effect now and will be removed in the future release.
+  - Deprecated `-framework_name` default argument 'TwitterScheduler'. In a future release this
+    will change to 'aurora'. Please be aware that depending on your usage of Mesos, this will
+    be a backward incompatible change. For details, see MESOS-703.
+  - The `-thermos_observer_root` command line arg has been removed from the scheduler. This was a
+    relic from the time when executor checkpoints were written globally, rather than into a task's
+    sandbox.
+  - Setting the `container` property of a `Job` to a `Container` holder is deprecated in favor of
+    setting it directly to the appropriate (i.e. `Docker` or `Mesos`) container type.
+  - Deprecated `numCpus`, `ramMb` and `diskMb` fields in `TaskConfig` and `ResourceAggregate` thrift
+    structs. Use `set<Resource> resources` to specify task resources or quota values.
+  - The endpoint `/slaves` is deprecated. Please use `/agents` instead.
+  - Deprecated `production` field in `TaskConfig` thrift struct. Use `tier` field to specify task
+    scheduling and resource handling behavior.
+  - The scheduler `resources_*_ram_gb` and `resources_*_disk_gb` metrics have been renamed to
+    `resources_*_ram_mb` and `resources_*_disk_mb` respectively. Note the unit change: GB -> MB.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.14.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/). The
+community also holds weekly IRC meetings at 11AM Pacific every Monday that you are welcome to join.
+
+## Thanks
+
+Thanks to the 11 contributors who made Apache Aurora 0.14.0 possible:
+
+* Chris Bannister
+* Dmitriy Shirchenko
+* John Sirois
+* Joshua Cohen
+* Maxim Khutornenko
+* Mehrdad Nurolahzade
+* Raymond Khalife
+* Renan DelValle
+* Stephan Erb
+* Zameer Manji
+* se choi
diff --git a/source/blog/2016-07-06-aurora-0-15-0-released.md b/source/blog/2016-07-06-aurora-0-15-0-released.md
new file mode 100644
index 0000000..831e231
--- /dev/null
+++ b/source/blog/2016-07-06-aurora-0-15-0-released.md
@@ -0,0 +1,43 @@
+---
+layout: post
+title: 0.15.0 Released
+permalink: /blog/aurora-0-15-0-released/
+published: true
+post_author:
+  display_name: Maxim Khutornenko
+  twitter: hutorrr
+tags: Release
+---
+
+The latest Apache Aurora release, 0.15.0, is now available for
+[download](http://aurora.apache.org/downloads/). The main scope of this release is to catch up 
+with Mesos release cadence. As such, no major development or deprecation changes were accepted.
+Here are some highlights in this release:
+
+  - New scheduler commandline argument `-enable_mesos_fetcher` to allow job submissions
+    to contain URIs which will be passed to the Mesos Fetcher and subsequently downloaded into
+    the sandbox. Please note that enabling job submissions to download resources from arbitrary
+    URIs may have security implications.
+  - Upgraded Mesos to 0.28.2.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.15.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/).
+
+## Thanks
+
+Thanks to the 8 contributors who made Apache Aurora 0.15.0 possible:
+
+
+* Benjamin Staffin
+* John Sirois
+* Joshua Cohen
+* Martin Hrabovcin
+* Maxim Khutornenko
+* Mehrdad Nurolahzade
+* Renan DelValle
+* Stephan Erb
diff --git a/source/blog/2016-09-28-aurora-0-16-0-released.md b/source/blog/2016-09-28-aurora-0-16-0-released.md
new file mode 100644
index 0000000..a373bf7
--- /dev/null
+++ b/source/blog/2016-09-28-aurora-0-16-0-released.md
@@ -0,0 +1,99 @@
+---
+layout: post
+title: 0.16.0 Released
+permalink: /blog/aurora-0-16-0-released/
+published: true
+post_author:
+  display_name: Joshua Cohen
+  twitter: heyjoshua
+tags: Release
+---
+
+The latest Apache Aurora release, 0.16.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+- Upgraded Mesos to 1.0.0. Note: as part of this upgrade we have switched from depending on
+  the mesos.native egg for Thermos in favor of the stripped down mesos.executor egg. This means
+  users launching Docker tasks with the Mesos DockerContainerizer are no longer required to use
+  images that include all of Mesos's dependencies.
+- Scheduler command line behavior has been modified to warn users of the deprecation of `production`
+  attribute in `Job` thrift struct. The scheduler is queried for tier configurations and the user's
+  choice of `tier` and `production` attributes is revised, if necessary. If `tier` is already set,
+  the `production` attribute might be adjusted to match the `tier` selection. Otherwise, `tier` is
+  selected based on the value of `production` attribute. If a matching tier is not found, the
+  `default` tier from tier configuration file (`tiers.json`) is used.
+- The `/offers` endpoint has been modified to display attributes of resource offers as received
+  from Mesos. This has affected rendering of some of the existing attributes. Furthermore, it now
+  dumps additional offer attributes including [reservations](http://mesos.apache.org/documentation/latest/reservation/)
+  and [persistent volumes](http://mesos.apache.org/documentation/latest/persistent-volume/).
+- The scheduler API now accepts both thrift JSON and binary thrift. If a request is sent without a
+  `Content-Type` header, or a `Content-Type` header of `application/x-thrift` or `application/json`
+  or `application/vnd.apache.thrift.json` the request is treated as thrift JSON. If a request is
+  sent with a `Content-Type` header of `application/vnd.apache.thrift.binary` the request is treated
+  as binary thrift. If the `Accept` header of the request is `application/vnd.apache.thrift.binary`
+  then the response will be binary thrift. Any other value for `Accept` will result in thrift JSON.
+- Scheduler is now able to launch jobs using more than one executor at a time. To use this feature
+  the `-custom_executor_config` flag must point to a JSON file which contains at least one valid
+  executor configuration as detailed in the [configuration](docs/features/custom-executors.md)
+  documentation.
+- Add rollback API to the scheduler and new client command to support rolling back
+  active update jobs to their initial state.
+- <a name="zk_use_curator_upgrade"></a> The scheduler flag `-zk_use_curator` now defaults to `true`
+  and care should be taken when upgrading from a configuration that does not pass the flag. The
+  scheduler upgrade should be performed by bringing all schedulers down, and then bringing upgraded
+  schedulers up. A rolling upgrade would result in no leading scheduler for the duration of the
+  roll which could be confusing to monitor and debug.
+- A new command `aurora_admin reconcile_tasks` is now available on the Aurora admin client that can trigger
+  implicit and explicit task reconciliations.
+- Add a new MTTS (Median Time To Starting) metric in addition to MTTA and MTTR.
+- In addition to CPU resources, RAM resources can now be treated as revocable via the scheduler
+  commandline flag `-enable_revocable_ram`.
+- Introduce UpdateMetadata fields in JobUpdateRequest to allow clients to store metadata on update.
+- Changed cronSchedule field inside of JobConfiguration schema to be optional for compatibility with Go.
+- Update default value of command line option `-framework_name` to 'Aurora'.
+- Tasks launched with filesystem images and the Mesos unified containerizer are now fully isolated from
+  the host's filesystem. As such they are no longer required to include any of the executor's
+  dependencies (e.g. Python 2.7) within the task's filesystem.
+
+Deprecations and removals:
+
+- The job configuration flag `production` is now deprecated. To achieve the same scheduling behavior
+  that `production=true` used to provide, users should elect a `tier` for the job with attributes
+  `preemptible=false` and `revocable=false`. For example, the `preferred` tier in the default tier
+  configuration file (`tiers.json`) matches the above criteria.
+- The `ExecutorInfo.source` field is deprecated and has been replaced with a label named `source`.
+  It will be removed from Mesos in a future release.
+- The scheduler flag `-zk_use_curator` has been deprecated. If you have never set the flag and are
+  upgrading you should take care as described in the [note](#zk_use_curator_upgrade) above.
+- The `key` argument of `getJobUpdateDetails` has been deprecated. Use the `query` argument instead.
+- The --release-threshold option on `aurora job restart` has been removed.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.16.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/).
+
+## Thanks
+
+Thanks to the 16 contributors who made Apache Aurora 0.16.0 possible:
+
+
+* David McLaughlin
+* David Robinson
+* Dmitriy Shirchenko
+* Igor Morozov
+* John Sirois
+* Joshua Cohen
+* Kai Huang
+* Karthik Anantha Padmanabhan
+* Maxim Khutornenko
+* Mehrdad Nurolahzade
+* Renan DelValle
+* Santhosh Kumar Shanmugham
+* Stephan Erb
+* Tarun Gogineni
+* Zameer Manji
+* Zhitao Li
diff --git a/source/blog/2017-02-06-aurora-0-17-0-released.md b/source/blog/2017-02-06-aurora-0-17-0-released.md
new file mode 100644
index 0000000..216444f
--- /dev/null
+++ b/source/blog/2017-02-06-aurora-0-17-0-released.md
@@ -0,0 +1,93 @@
+---
+layout: post
+title: 0.17.0 Released
+permalink: /blog/aurora-0-17-0-released/
+published: true
+post_author:
+  display_name: Stephan Erb
+  twitter: ErbStephan
+tags: Release
+---
+
+The latest Apache Aurora release, 0.17.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release.
+
+Major new features:
+
+- Use the `RUNNING` state of a task to indicate it is healthy and behaving as expected.
+  A service will remain in `STARTING` state until a configurable amount of consecutive health checks
+  have passed. Job updates can therefore rely purely on health checks rather than `watch_secs`
+  timeout when deciding an individial instance update state, by setting `watch_secs` to 0.
+- The Aurora Scheduler API supports volume mounts per task for the Mesos
+  Containerizer if the scheduler is running with the `-allow_container_volumes`
+  flag.
+- Resolve docker tags to concrete identifiers for DockerContainerizer, so that job configuration
+  is immutable across restarts. The feature introduces a new `{{docker.image[name][tag]}}` binder that
+  can be used in the Aurora job configuration to resolve a docker image specified by its `name:tag`
+  to a concrete identifier specified by its `registry/name@digest`. It requires version 2 of the
+  Docker Registry.
+
+Performance improvements:
+
+- Scheduling performance has been improved by scheduling multiple tasks per scheduling round.
+- Preemption slot search logic is modified to improve its performance.
+  - Multiple reservations are made per task group per round.
+  - Multiple reservations are evaluated per round.
+- The default logging output has been changed to remove line numbers and inner class information in
+  exchange for faster logging.
+- The Aurora client is now using the Thrift binary protocol to communicate with the scheduler.
+- Added a new flag `--snapshot_hydrate_stores` that controls which H2-backed stores to write fully
+  hydrated into the Scheduler snapshot. Can lead to significantly lower snapshot times for large
+  clusters if you set this flag to an empty list. Old behavior is preserved by default, but see
+  org.apache.aurora.scheduler.storage.log.SnapshotStoreImpl for which stores we currently have
+  duplicate writes for.
+- New scheduler metrics are added to facilitate monitoring and performance studies (AURORA-1832).
+
+
+Additional changes:
+
+- Upgraded Mesos to 1.1.0.
+- A task's tier is now mapped to a label on the Mesos `TaskInfo` proto.
+- Introduce a new `--ip` option to bind the Thermos observer to a specific rather than all
+  interfaces.
+- Fix error that prevents the scheduler from being launched with `-enable_revocable_ram`.
+* The executor will send SIGTERM to processes that self daemonize via double forking.
+- Support the deployment of the Aurora scheduler behind HTTPS-enabled reverse proxies: By launching
+  scheduler via `-serverset_endpoint_name=https` you can ensure the Aurora client will correctly
+  discover HTTPS support via the ZooKeeper-based discovery mechanism.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.17.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/).
+
+## Thanks
+
+Thanks to the 22 contributors who made Apache Aurora 0.17.0 possible:
+
+* Andrew Jorgensen
+* Bing-Qian Luan
+* David McLaughlin
+* Dmitriy Shirchenko
+* Jake Smullin
+* Jing Chen
+* John Sirois
+* Joshua Cohen
+* Justin Pinkul
+* Kai Huang
+* Maxim Khutornenko
+* Mehrdad Nurolahzade
+* Nicolás Donatucci
+* Pierre Cheynier
+* Pradyumna Kaushik
+* Renan DelValle
+* Reza Motamedi
+* Rogier Dikkes
+* Santhosh Kumar Shanmugham
+* Stephan Erb
+* Steve Niemitz
+* Zameer Manji
+
diff --git a/source/blog/2017-06-20-aurora-0-18-0-released.md b/source/blog/2017-06-20-aurora-0-18-0-released.md
new file mode 100644
index 0000000..5ef21cb
--- /dev/null
+++ b/source/blog/2017-06-20-aurora-0-18-0-released.md
@@ -0,0 +1,86 @@
+---
+layout: post
+title: 0.18.0 Released
+permalink: /blog/aurora-0-18-0-released/
+published: true
+post_author:
+  display_name: Santhosh Kumar Shanmugham
+  twitter: santhk 
+tags: Release
+---
+
+The latest Apache Aurora release, 0.18.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release.
+
+Major new features:
+
+- Added the `-mesos_driver` flag to the scheduler with three possible options:
+  `SCHEDULER_DRIVER`, `V0_MESOS`, `V1_MESOS`. The first uses the original driver
+  and the latter two use two new drivers from `libmesos`. `V0_MESOS` uses the
+  `SCHEDULER_DRIVER` under the hood and `V1_MESOS` uses a new HTTP API aware
+  driver. Users that want to use the HTTP API should use `V1_MESOS`.
+  Performance sensitive users should stick with the `SCHEDULER_DRIVER` or
+  `V0_MESOS` drivers.
+- Add observer command line options to control the resource collection interval
+  for observed tasks. See [here](docs/reference/observer-configuration.md) for details.
+- Added support for reserving agents during job updates, which can substantially reduce update times
+  in clusters with high contention for resources. Disabled by default, but can be enabled with
+  `enable_update_affinity` option, and the reservation timeout can be controlled via
+  `update_affinity_reservation_hold_time`.
+- Add `task scp` command to the CLI client for easy transferring of files to/from/between task
+  instances. See [here](docs/reference/client-commands.md#scping-with-task-machines) for details.
+  Currently only fully supported for Mesos containers (you can copy files from the Docker container
+  sandbox but you cannot send files to it).
+- Added ability to inject your own scheduling logic, via a lazy Guice module binding. This is an
+  alpha-level feature and not subject to backwards compatibility considerations. You can specify
+  your custom modules using the `task_assigner_modules` and `preemption_slot_finder_modules` options.
+- Added support for resource bin-packing via the `-offer_order` option. You can choose from `CPU`,
+  `MEMORY`, `DISK`, `RANDOM` or `REVOCABLE_CPU`. You can also compose secondary sorts by combining
+  orders together: e.g. to bin-pack by CPU and MEMORY you could supply `CPU,MEMORY`. The current
+  default is `RANDOM`, which has the strong advantage that users can (usually) relocate their tasks
+  due to noisy neighbors or machine issues with a task restart. When you have deterministic
+  bin-packing, they may always end up on the same agent. So be careful enabling this without proper
+  monitoring and remediation of host failures.
+- Modified job update behavior to create new instances, then update existing instances, and then
+  kill unwanted instances. Previously, a job update would modify each instance in the order of
+  their instance ID.
+- Added ability to whitelist TaskStateChanges in the webhook configuration file. You can specify
+  a list of desired TaskStateChanges(represented by their task statuses) to be sent to a configured
+  endpoint.
+- Add message parameter to `killTasks` RPC.
+- Add `prune_tasks` endpoint to `aurora_admin`. See `aurora_admin prune_tasks -h` for usage information.
+- Add support for per-task volume mounts for Mesos containers to the Aurora config DSL.
+
+Additional changes:
+
+- Update to Mesos 1.2.0. Please upgrade Aurora to 0.18 before upgrading Mesos to 1.2.0 if you rely
+  on Mesos filesystem images.
+  
+**NOTE: In this release, the Aurora client will need to be updated before the scheduler
+can be deployed.**
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.18.0).
+
+## Getting Involved
+
+We encourage you to try out this release and let us know what you think. If you run into any issues,
+please let us know on the [user mailing list and IRC](https://aurora.apache.org/community/).
+
+## Thanks
+
+Thanks to the 12 contributors who made Apache Aurora 0.18.0 possible:
+
+* Abhishek Jain
+* Charles Raimbert
+* Cody Gibb
+* David McLaughlin
+* Kai Huang
+* Mehrdad Nurolahzade
+* Nicolás Donatucci
+* Reza Motamedi
+* Santhosh Kumar Shanmugham
+* Stephan Erb
+* Takuya Kuwahara
+* Zameer Manji
+
diff --git a/source/blog/2017-11-01-aurora-0-18-1-released.md b/source/blog/2017-11-01-aurora-0-18-1-released.md
new file mode 100644
index 0000000..1e6fbc2
--- /dev/null
+++ b/source/blog/2017-11-01-aurora-0-18-1-released.md
@@ -0,0 +1,16 @@
+---
+layout: post
+title: 0.18.1 Released
+permalink: /blog/aurora-0-18-1-released/
+published: true
+post_author:
+  display_name: Bill Farner
+tags: Release
+---
+
+The latest Apache Aurora release, 0.18.1, is now available for
+[download](http://aurora.apache.org/downloads/). This is a patch release to
+update the Shiro library to version 1.2.5.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.18.1).
diff --git a/source/blog/2017-11-10-aurora-0-19-0-released.md b/source/blog/2017-11-10-aurora-0-19-0-released.md
new file mode 100644
index 0000000..b2d151f
--- /dev/null
+++ b/source/blog/2017-11-10-aurora-0-19-0-released.md
@@ -0,0 +1,83 @@
+---
+layout: post
+title: 0.19.0 Released
+permalink: /blog/aurora-0-19-0-released/
+published: true
+post_author:
+  display_name: Bill Farner
+tags: Release
+---
+
+The latest Apache Aurora release, 0.19.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+- Added the ability to configure the executor's stop timeout, which is the maximum amount of time
+  the executor will wait during a graceful shutdown sequence before continuing the 'Forceful
+  Termination' process (see
+  [here](http://aurora.apache.org/documentation/latest/reference/task-lifecycle/) for details).
+- Added the ability to configure the wait period after calling the graceful shutdown endpoint and
+  the shutdown endpoint using the `graceful_shutdown_wait_secs` and `shutdown_wait_secs` fields in
+  `HttpLifecycleConfig` respectively. Previously, the executor would only wait 5 seconds between
+  steps (adding up to a total of 10 seconds as there are 2 steps). The overall waiting period is
+  bounded by the executor's stop timeout, which can be configured using the executor's
+  `stop_timeout_in_secs` flag.
+- Added the `thrift_method_interceptor_modules` scheduler flag that lets cluster operators inject
+  custom Thrift method interceptors.
+- Increase default ZooKeeper session timeout from 4 to 15 seconds.
+- Added option `-zk_connection_timeout` to control the connection timeout of ZooKeeper connections.
+- Added scheduler command line argument `-hold_offers_forever`, suitable for use in clusters where
+  Aurora is the only framework.  This setting disables other options such as `-min_offer_hold_time`,
+  and allows the scheduler to more efficiently cache scheduling attempts.
+- The scheduler no longer uses an internal H2 database for storage.
+- There is a new Scheduler UI which, in addition to the facelift, provides the ability to inject your
+  own custom UI components.
+
+Deprecations and removals:
+
+- Removed the deprecated command line argument `-zk_use_curator`, removing the choice to use the
+  legacy ZooKeeper client.
+- Removed the `rewriteConfigs` thrift API call in the scheduler. This was a last-ditch mechanism
+  to modify scheduler state on the fly. It was considered extremely risky to use since its
+  inception, and is safer to abandon due to its lack of use and likelihood for code rot.
+- Removed the Job environment validation from the command line client. Validation was moved to the
+  the scheduler side through the `allowed_job_environments` option. By default allowing any of
+  `devel`, `test`, `production`, and any value matching the regular expression `staging[0-9]*`.
+- Removed scheduler command line arguments related to the internal H2 database, which is no longer
+  used:
+  - `-use_beta_db_task_store`
+  - `-enable_db_metrics`
+  - `-slow_query_log_threshold`
+  - `-db_row_gc_interval`
+  - `-db_lock_timeout`
+  - `-db_max_active_connection_count`
+  - `-db_max_idle_connection_count`
+  - `-snapshot_hydrate_stores`
+  - `-enable_h2_console`
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.19.0).
+
+## Thanks
+
+Thanks to the 14 contributors who made Apache Aurora 0.19.0 possible:
+
+* Bill Farner
+* David McLaughlin
+* Derek Slager
+* Jordan Ly
+* Kai Huang
+* Keisuke Nishimoto
+* Mauricio Garavaglia
+* Renan DelValle
+* Reza Motamedi
+* Robert Allen
+* Ruben D. Porras
+* Santhosh Kumar Shanmugham
+* Stephan Erb
+* Zameer Manji
+
+
+
+
+
+
diff --git a/source/blog/2018-02-11-aurora-0-19-1-released.md b/source/blog/2018-02-11-aurora-0-19-1-released.md
new file mode 100644
index 0000000..482a021
--- /dev/null
+++ b/source/blog/2018-02-11-aurora-0-19-1-released.md
@@ -0,0 +1,22 @@
+---
+layout: post
+title: 0.19.1 Released
+permalink: /blog/aurora-0-19-1-released/
+published: true
+post_author:
+  display_name: Renan DelValle
+tags: Release
+---
+
+The latest Apache Aurora release, 0.19.1, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.19.1).
+
+
+
+
+
+
diff --git a/source/blog/2018-04-03-aurora-0-20-0-released.md b/source/blog/2018-04-03-aurora-0-20-0-released.md
new file mode 100644
index 0000000..ec44deb
--- /dev/null
+++ b/source/blog/2018-04-03-aurora-0-20-0-released.md
@@ -0,0 +1,63 @@
+---
+layout: post
+title: 0.20.0 Released
+permalink: /blog/aurora-0-20-0-released/
+published: true
+post_author:
+  display_name: Renan DelValle
+tags: Release
+---
+
+The latest Apache Aurora release, 0.20.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+- Updated to Mesos 1.4.0.
+- Added experimental support for the Mesos partition-aware APIs. The key idea is a new ScheduleStatus
+  PARTITIONED that represents a task in an unknown state. Users of Aurora can have per-job policies
+  of whether or not to reschedule and how long to wait for the partition to heal. Backwards
+  compatibility with existing behavior (i.e. transition to LOST immediately on a partition) is
+  maintained. The support is experimental due to bugs found in Mesos that would cause issues in
+  a production cluster. For that reason, the functionality is behind a new flag `-partition_aware`
+  that is disabled by default. When Mesos support is improved and the new behavior is vetted in
+  production clusters, we'll enable this by default.
+- Added the ability to inject custom offer holding and scheduling logic via the `-offer_set_module`
+  scheduler flag. To take advantage of this feature, you will need to implement the `OfferSet`
+  interface.
+- Added `executor_config` field to the Job object of the DSL which will populate
+  `JobConfiguration.TaskConfig.ExecutorConfig`. This allows for using custom executors defined
+  through the `--custom_executor_config` scheduler flag. See our
+  [custom-executors](https://aurora.apache.org/documentation/0.20.0/features/custom-executors/) documentation for more information.
+- Added support in Thermos Observer for delegating disk usage monitoring to Mesos agent. The feature
+  can be enabled via `--enable_mesos_disk_collector` flag, in which case Observer will use the
+  agent's containers HTTP API to query the amount of used bytes for each container. Note that disk
+  isolation should be enabled in Mesos agent. This feature is not compatible with authentication
+  enabled agents.
+
+Deprecations and removals:
+
+- Removed the ability to recover from SQL-based backups and snapshots.  An 0.20.0 scheduler
+  will not be able to recover backups or replicated log data created prior to 0.19.0.
+- Removed task level resource fields (`numCpus`, `ramMb`, `diskMb`, `requestedPorts`).
+- Removed the `-offer_order_modules` scheduler flag related to custom injectable offer orderings,
+  since this will now be subsumed under custom `OfferSet` implementations (see the comment above):
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.20.0).
+
+## Thanks
+
+Thanks to the 12 contributors who made Apache Aurora 0.20.0 possible:
+
+* Jing Chen
+* Franck Cuny
+* Renan DelValle
+* Nicolás Donatucci
+* Stephan Erb
+* Bill Farner
+* Juan Manuel Fresia
+* Jordan Ly
+* David McLaughlin
+* Reza Motamedi
+* Ruben D. Porras
+* John Sirois
+
diff --git a/source/blog/2018-09-10-aurora-0-21-0-released.md b/source/blog/2018-09-10-aurora-0-21-0-released.md
new file mode 100644
index 0000000..de036e4
--- /dev/null
+++ b/source/blog/2018-09-10-aurora-0-21-0-released.md
@@ -0,0 +1,74 @@
+---
+layout: post
+title: 0.21.0 Released
+permalink: /blog/aurora-0-21-0-released/
+published: true
+post_author:
+  display_name: Renan DelValle
+tags: Release
+---
+
+The latest Apache Aurora release, 0.21.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+- Updated to Mesos 1.5.0.
+- Introduce ability for tasks to specify custom SLA requirements via the new `SlaPolicy` structs.
+  There are 3 different SLA Policies that are currently supported - `CountSlaPolicy`,
+  `PercentageSlaPolicy` and `CoordinatorSlaPolicy`. SLA policies based on count and percentage
+  express the required number of `RUNNING` instances as either a count or percentage in addition to
+  allowing the duration-window for which these requirements have to be satisfied. For applications
+  that need more control over how SLA is determined, a custom SLA calculator can be configured a.k.a
+  Coordinator. Any action that can affect SLA, will first check with the Coordinator before
+  proceeding.
+
+    **IMPORTANT: The storage changes required for this feature will make scheduler
+    snapshot backwards incompatible. Scheduler will be unable to read snapshot if rolled back to
+    previous version. If rollback is absolutely necessary, perform the following steps:**
+    1. Stop all host maintenance requests via `aurora_admin host_activate`.
+    2. Ensure a new snapshot is created by running `aurora_admin scheduler_snapshot <cluster>`
+    3. Rollback to previous version
+
+  Note: The `Coordinator` interface required for the `CoordinatorSlaPolicy` is experimental at
+  this juncture and is bound to change in the future.
+- Introduced ability for updates to be 'SLA-aware', or only update instances if it is within SLA,
+  using the new `sla_aware` field in `UpdateConfig`. See the bullet point above for an explanation
+  of custom SLA requirements.
+
+  **NOTE**: SLA-aware updates will use the desired config's SLA, not the existing config.
+
+  Three additional scheduler options have been added to support this feature:
+
+    1. `max_update_action_batch_size (default: 300)`: the number of update actions to process in a
+    batch.
+    2. `sla_aware_kill_retry_min_delay (default: 1mins)`: the minimum amount of time to wait before
+    retrying an SLA-aware kill (using a truncated binary backoff).
+    3. `sla_aware_kill_retry_max_delay (default: 5mins)`: the maximum amount of time to wait before
+    retrying an SLA-aware kill (using a truncated binary backoff).
+
+Deprecations and removals:
+
+- Deprecated the `aurora_admin host_drain` command used for maintenance. With this release the SLA
+  computations are moved to the scheduler and it is no longer required for the client to compute
+  SLAs and watch the drains. The scheduler persists any host maintenance request and performs
+  SLA-aware drain of the tasks, before marking the host as `DRAINED`. So maintenance requests
+  survive across scheduler fail-overs. Use the newly introduced `aurora_admin sla_host_drain`
+  to skip the SLA computations on the admin client.
+- Removed resource fields (`numCpus`, `ramMb`, `diskMb`) from ResourceAggregate.
+
+Full release notes are available in the release
+[CHANGELOG](https://gitbox.apache.org/repos/asf?p=aurora.git&f=CHANGELOG&hb=rel/0.21.0).
+
+## Thanks
+
+Thanks to the 9 contributors who made Apache Aurora 0.21.0 possible:
+
+* Jing Chen
+* Renan DelValle
+* Stephan Erb
+* Jordan Ly
+* David McLaughlin
+* John Sirois
+* Sameer Parekh
+* Reza Motamedi
+* Ezequiel Torres Feyuk
+
diff --git a/source/blog/2019-12-12-aurora-0-22-0-released.md b/source/blog/2019-12-12-aurora-0-22-0-released.md
new file mode 100644
index 0000000..9d3175f
--- /dev/null
+++ b/source/blog/2019-12-12-aurora-0-22-0-released.md
@@ -0,0 +1,69 @@
+---
+layout: post
+title: 0.22.0 Released
+permalink: /blog/aurora-0-22-0-released/
+published: true
+post_author:
+  display_name: Renan DelValle
+tags: Release
+---
+
+The latest Apache Aurora release, 0.22.0, is now available for
+[download](http://aurora.apache.org/downloads/). Here are some highlights in this release:
+
+- Updated to Mesos 1.6.1.
+- New update strategy added: Variable Batch Update. With this strategy, a job may be updated in
+  in batches of different sizes. For example, an update which modifies a total of 10 instances may
+  be done in batch sizes of 2, 3, and 5. The number of updated instances must equal the size of the
+  current group size in order to move to the next group size. If the number of updated instances is
+  greater to the sum of all group sizes, the last group size will be used in perpetuity until all
+  instances are updated.
+  A new field has been added to `UpdateConfig` called `update_strategy`.
+  Update strategy may take a `QueueUpdateStrategy`, `BatchUpdateStrategy`,
+  or a `VariableBatchUpdateStrategy` object. `QueueUpdateStrategy` and `BatchUpdateStrategy` take
+  a single integer argument while `VariableBatchUpdateStrategy` takes a list of positive integers
+  as an argument.
+- Users may now set a value for the URI fetcher to rename a downloaded artifact to after it
+  has been downloaded.
+- Auto pause feature added to VariableBatch strategy and Batch strategy. With this feature enabled,
+  when an update is `ROLLING_FORWARD`, the update will automatically pause itself right before
+  a new batch is started. (This feature is being released as tested but in beta state. We are
+  looking to collect feedback before we consider it fully stable.)
+- `loader.load()` now uses memoization on the config file path so that we only load and process
+  each config file once.
+- Instances run with custom executors will no longer show links to thermos observer.
+- Add observer command line option `--disable_task_resource_collection` to disable the collection of
+  CPU, memory, and disk metrics for observed tasks. This is useful in setups where metrics cannot be
+  gathered reliable (e.g. when using PID namespaces) or when it is expensive due to hundreds of
+  active tasks per host.
+- Added flag `-sla_aware_kill_non_prod` which allows operators to enable SLA aware killing
+  for non-production jobs. Jobs are considered non-production when they are preemptable and/or
+  revocable.
+
+Deprecations and removals:
+
+- Deprecated use of Thrift fields `JobUpdateSettings.waitForBatchCompletion` and
+  `JobUpdateSettings.updateGroupSize`. Please set the proper `JobUpdateSettings.updateStrategy`
+  instead. Note that these same constructs, as represented in the Aurora DSL, are still valid
+  as they will be converted to the new field automatically by the client
+  for backwards compatibility.
+- Backfill code for finding a matching tier to for a Job has been removed. Tier will now be set
+  when a Job is received by the scheduler. If no tier is provided, the default tier defined in
+  `-tier_config`.
+
+Full release notes are available in the release
+[CHANGELOG](https://github.com/apache/aurora/milestone/2?closed=1).
+
+## Thanks
+
+Thanks to the 8 contributors who made Apache Aurora 0.22.0 possible:
+
+* Se Choi
+* Raúl Cuza
+* Renan DelValle
+* Stephan Erb
+* Mauricio Garavaglia
+* Daniel Knightly
+* Philipp Sontag
+* Justin Venus
+
diff --git a/source/blog/feed.xml.erb b/source/blog/feed.xml.erb
index 51bc2b9..32f821a 100644
--- a/source/blog/feed.xml.erb
+++ b/source/blog/feed.xml.erb
@@ -2,13 +2,13 @@
 
 <feed xmlns="http://www.w3.org/2005/Atom">
   <title>Apache Aurora Blog</title>
-  <id>http://aurora.incubator.apache.org/blog</id>
-  <link href="http://aurora.incubator.apache.org/blog" />
-  <link href="http://aurora.incubator.apache.org/blog/feed.xml" rel="self"/>
+  <id>http://aurora.apache.org/blog</id>
+  <link href="http://aurora.apache.org/blog" />
+  <link href="http://aurora.apache.org/blog/feed.xml" rel="self"/>
   <updated><%= blog.articles.first.date.to_time.iso8601  %></updated>
   <% blog.articles.each do |article| %>
   <entry>
-    <id>http://aurora.incubator.apache.org<%= article.url %></id>
+    <id>http://aurora.apache.org<%= article.url %></id>
     <link href="<%= article.url %>" />
     <title>
       <%= article.title %>
diff --git a/source/community.html.md b/source/community.html.md
deleted file mode 100644
index 7abc682..0000000
--- a/source/community.html.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# Community
-
-<div class="row-fluid">
-		<div class="col-md-4">
-		<h3>Contributing to Aurora</h3>
-		<h4 name="reportbugs">Report or track a bug</h4>
-		<p>Bugs can be reported on our <a href="http://issues.apache.org/jira/browse/AURORA">JIRA issue tracker</a>. In order to create a new issue, you'll need register for an account.</p>
-
-		<h4 name="contribute">Submit a core patch</h4>
-		<p>Please follow our <a href="/docs/howtocontribute/">contribution guidelines</a> when submitting a patch. We welcome them!</p>
-	</div>
-	<div class="col-md-4">
-		<h3>Stay in Touch</h3>
-		<h4>Mailing lists</h4>
-		<p><strong>Developers</strong> - <a href="mailto:dev-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:dev-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/">Archive</a><br />
-
-			<strong>Issues</strong> - <a href="mailto:issues-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:issues-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-issues/">Archive</a><br />
-
-			<strong>Commits</strong> - <a href="mailto:commits-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:commits-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-commits/">Archive</a><br />
-
-     	<strong>Reviews</strong> - <a href="mailto:reviews-subscribe@aurora.incubator.apache.org">Subscribe</a> | <a href="mailto:reviews-unsubscribe@aurora.incubator.apache.org">Unsubscribe</a> | <a href="http://mail-archives.apache.org/mod_mbox/incubator-aurora-reviews/">Archive</a></p>
-
-		<h4 name="ircchannel">IRC</h4>
-		<p>Developers and users hang out in <code>#aurora</code> on <code>irc.freenode.net</code>. If you're new to IRC, we suggest trying a <a href="http://webchat.freenode.net/?channels=#aurora">web-based client</a>.</p>
-		
-		<h4>Join our weekly IRC meetings</h4>
-		<p>We hold weekly IRC meetings as an opportunity for users and developers to engage.  Meetings are every Monday at <a href="http://www.timeanddate.com/worldclock/fixedtime.html?msg=Aurora+Weekly+Meeting&iso=20140421T11&p1=224&ah=1">11AM Pacific Time</a>.</p>
-		</div>
-	<div class="col-md-4">
-		<h3>Follow the Project</h3>		
-		<a class="twitter-timeline" href="https://twitter.com/ApacheAurora" data-widget-id="512693636127920129">Tweets by @ApacheMesos</a>
-		<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+"://platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
-	</div>
-</div>
\ No newline at end of file
diff --git a/source/community.html.md.erb b/source/community.html.md.erb
new file mode 100644
index 0000000..9e780d8
--- /dev/null
+++ b/source/community.html.md.erb
@@ -0,0 +1,42 @@
+# Community
+
+<div class="row-fluid">
+  <div class="col-md-4">
+    <h3>Contributing</h3>
+    <h4 name="reportbugs">Report or track a bug</h4>
+    <p>Bugs can be reported on our <a href="http://issues.apache.org/jira/browse/AURORA">JIRA</a>
+       or raise an issue on our <a href="https://github.com/apache/aurora/issues">GitHub</a> repository.</p>
+    <p>In order to create a new issue on JIRA you'll need register for an accountr. A GitHub account is
+       requried to raise issues on our repository.</p>
+
+    <h4 name="contribute">Submit a patch</h4>
+    <p>Please read our <a href="/documentation/latest/contributing/">contribution guide</a>
+       when submitting a patch. We welcome them!
+    </p>
+
+    <h3>Chat</h3>
+    <h4 name="slack">Slack</h4>
+    <p>Developers and users hang out in <code>#aurora</code> on <a href="https://mesos.slack.com/">
+       <code>mesos.slack.com</code></a>.</p>
+    <p>To request an invite for slack please click <a href="https://mesos-slackin.herokuapp.com/">here</a>.</p>
+    <p>All slack communication is publicly archived <a href="http://mesos.slackarchive.io/aurora/">here</a>.</p>
+  </div>
+  <div class="col-md-4">
+    <h3>Mailing lists</h3>
+    <p>
+    <% data.mailing_lists.lists.each do |list| %>
+      <strong><%= list.title %></strong><br />
+      <%= list.description %><br />
+      <a href="mailto:<%= list.address %>-subscribe@aurora.apache.org">Subscribe</a> |
+      <a href="mailto:<%= list.address %>-unsubscribe@aurora.apache.org">Unsubscribe</a> |
+      <a href="https://lists.apache.org/list.html?<%= list.address %>@aurora.apache.org">Archive</a>
+      <br /><br />
+    <% end %>
+    </p>
+  </div>
+  <div class="col-md-4">
+    <h3>Follow the Project</h3>
+    <a class="twitter-timeline" href="https://twitter.com/ApacheAurora">Tweets by ApacheAurora</a>
+    <script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
+  </div>
+</div>
diff --git a/source/developers.html.md b/source/developers.html.md
deleted file mode 100644
index 4fe641b..0000000
--- a/source/developers.html.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Developers
-<div class="row-fluid">
-  <h4 name="reportbugs">Report or track a bug</h4>
-  <p>New bugs can be reported on our <a href="http://issues.apache.org/jira/browse/AURORA">Jira issue tracker</a>. In order to create a new issue, you'll need to signup for an account.</p>
-
-  <h4 name="contribute">Contribute a core patch</h4>
-  <p>Follow our <a href="/docs/howtocontribute/">contribution guidelines</a> when submitting a patch.</p>
-
-  <h4 name="ircchannel">IRC Channel</h4>
-	<p>Many of the Aurora developers and users chat in the #aurora channel on irc.freenode.net.</p>
-	
-	<p>If you are new to IRC, you can use a <a href="http://webchat.freenode.net/?channels=#aurora">web-based client</a>.</p>
-</div>
diff --git a/source/docs/gettingstarted.html.md b/source/docs/gettingstarted.html.md
deleted file mode 100644
index 6355e7c..0000000
--- a/source/docs/gettingstarted.html.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Getting Started
-
-The Aurora 'Getting Started' guide is [currently available on github](https://github.com/apache/incubator-aurora/blob/master/README.md) while the project moves its documentation to the Apache Software Foundation.
\ No newline at end of file
diff --git a/source/docs/howtocontribute.html.md b/source/docs/howtocontribute.html.md
deleted file mode 100644
index f20db29..0000000
--- a/source/docs/howtocontribute.html.md
+++ /dev/null
@@ -1,50 +0,0 @@
-How To Contribute
-=================
-
-Getting your ReviewBoard Account
---------------------------------
-Go to https://reviews.apache.org and create an account.
-
-Setting up your ReviewBoard Environment
----------------------------------------
-Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
-Subsequent runs will cache your login credentials.
-
-Submitting a Patch for Review
------------------------------
-Post a review with `rbt`, fill out the fields in your browser and hit Publish.
-
-    ./rbt post -o -g
-
-Updating an Existing Review
----------------------------
-Incorporate review feedback, make some more commits, update your existing review, fill out the
-fields in your browser and hit Publish.
-
-    ./rbt post -o -r <RB_ID>
-
-Merging Your Own Review (Committers)
-------------------------------------
-Once you have shipits from the right committers, merge your changes in a single commit and mark
-the review as submitted. The typical workflow is:
-
-    git checkout master
-    git pull origin master
-    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
-                            # editing if necessary.
-    git show master         # Verify everything looks sane
-    git push origin master
-    ./rbt close <RB_ID>
-
-Note that even if you're developing using feature branches you will not use `git merge` - each
-commit will be an atomic change accompanied by a ReviewBoard entry.
-
-Merging Someone Else's Review
------------------------------
-Sometimes you'll need to merge someone else's RB. The typical workflow for this is
-
-    git checkout master
-    git pull origin master
-    ./rbt patch -c <RB_ID>
-    git show master  # Verify everything looks sane, author is correct
-    git push origin master
\ No newline at end of file
diff --git a/source/documentation/0.10.0/build-system.md b/source/documentation/0.10.0/build-system.md
new file mode 100644
index 0000000..39c231d
--- /dev/null
+++ b/source/documentation/0.10.0/build-system.md
@@ -0,0 +1,100 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions: 
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1 
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+```
+% find src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner/__init__.py
+src/main/python/apache/thermos/runner/thermos_runner.py
+src/main/python/apache/thermos/runner/BUILD
+% cat src/main/python/apache/thermos/runner/BUILD
+# License boilerplate omitted
+import os
+
+
+# Private target so that a setup_py can exist without a circular dependency. Only targets within
+# this file should depend on this.
+python_library(
+  name = '_runner',
+  # The target covers every python file under this directory and subdirectories.
+  sources = rglobs('*.py'),
+  dependencies = [
+    '3rdparty/python:twitter.common.app',
+    '3rdparty/python:twitter.common.log',
+    # Source dependencies are always referenced without a ':'.
+    'src/main/python/apache/thermos/common',
+    'src/main/python/apache/thermos/config',
+    'src/main/python/apache/thermos/core',
+  ],
+)
+
+# Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+# argument to ./pants binary.
+python_binary(
+  name = 'thermos_runner',
+  # Use entry_point, not source so the files used here are the same ones tests see.
+  entry_point = 'apache.thermos.bin.thermos_runner',
+  dependencies = [
+    # Notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+)
+
+# The public library that everyone importing the runner symbols uses.
+# The test targets and any other dependent source code should depend on this.
+python_library(
+  name = 'runner',
+  dependencies = [
+    # Again, notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+  # We always provide a setup_py. This will cause any dependee libraries to automatically
+  # reference this library in their requirements.txt rather than copy the source files into their
+  # sdist.
+  provides = setup_py(
+    # Conventionally named and versioned.
+    name = 'apache.thermos.runner',
+    version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+  ).with_binaries({
+    # Every binary in this file should also be repeated here.
+    # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+    # supported.
+    # The console script name is always the same as the PEX with .pex stripped.
+    'thermos_runner': ':thermos_runner',
+  }),
+)
+```
diff --git a/source/documentation/latest/client-cluster-configuration.md b/source/documentation/0.10.0/client-cluster-configuration.md
similarity index 100%
rename from source/documentation/latest/client-cluster-configuration.md
rename to source/documentation/0.10.0/client-cluster-configuration.md
diff --git a/source/documentation/0.10.0/client-commands.md b/source/documentation/0.10.0/client-commands.md
new file mode 100644
index 0000000..7cda8ca
--- /dev/null
+++ b/source/documentation/0.10.0/client-commands.md
@@ -0,0 +1,411 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+        - [Client-orchestrated updates (deprecated)](#client-orchestrated-updates-deprecated)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.10.0/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job cancel-update`
+  - `job create`
+  - `job kill`
+  - `job restart`
+  - `job update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.10.0/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+#### Coordinated job updates
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](/documentation/0.10.0/configuration-reference/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+#### Client-orchestrated updates (deprecated)
+
+*Note: This feature is deprecated and will be removed in 0.9.0.
+Please use aurora update instead.*
+
+    aurora job update CLUSTER/ROLE/ENV/NAME[/INSTANCES] <configuration file>
+    aurora job cancel-update CLUSTER/ROLE/ENV/NAME
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by specifying
+specific instances that should be updated.
+
+You may want to run `aurora job diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora job cancel-update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `job cancel-update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `job cancel-update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `job update`.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.10.0/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](/documentation/0.10.0/deploying-aurora-scheduler/#dedicated-attribute)
+[production](/documentation/0.10.0/configuration-reference/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.10.0/committers.md b/source/documentation/0.10.0/committers.md
new file mode 100644
index 0000000..e6b1646
--- /dev/null
+++ b/source/documentation/0.10.0/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.10.0/configuration-reference.md b/source/documentation/0.10.0/configuration-reference.md
new file mode 100644
index 0000000..ff613be
--- /dev/null
+++ b/source/documentation/0.10.0/configuration-reference.md
@@ -0,0 +1,634 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [Revocable Jobs](#revocable-jobs)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.10.0/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.10.0/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.10.0/resources/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.10.0/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](/documentation/0.10.0/resources/#task-preemption) other tasks (Default: False). Production job role must have the appropriate [quota](/documentation/0.10.0/resources/#resource-quota).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. When set to `revocable` requires the task to run with Mesos revocable resources. This is work [in progress](https://issues.apache.org/jira/browse/AURORA-1343) and is currently only supported for the revocable tasks. The ultimate goal is to simplify task configuration by hiding various configuration knobs behind a task tier definition. See AURORA-1343 and AURORA-1443 for more details.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### Revocable Jobs
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+More implementation details in this [ticket](https://issues.apache.org/jira/browse/AURORA-1343).
+
+Scheduler must be [configured](/documentation/0.10.0/deploying-aurora-scheduler/#configuring-resource-oversubscription)
+to receive revocable offers from Mesos and accept revocable jobs. If not configured properly
+revocable tasks will never get assigned to hosts and will stay in PENDING.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](/documentation/0.10.0/client-commands/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the health check if the response code differs. (Default: 0)
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.10.0/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Object
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `enable_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters. 
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HTTPLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HTTPLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HTTPLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.10.0/configuration-tutorial.md b/source/documentation/0.10.0/configuration-tutorial.md
new file mode 100644
index 0000000..7641b86
--- /dev/null
+++ b/source/documentation/0.10.0/configuration-tutorial.md
@@ -0,0 +1,951 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora job update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.10.0/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.10.0/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.10.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.10.0/configuration-reference/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.10.0/configuration-reference/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora+Thermos Configuration Reference](/documentation/0.10.0/configuration-reference/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.10.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.10.0/contributing.md b/source/documentation/0.10.0/contributing.md
new file mode 100644
index 0000000..08c8ad1
--- /dev/null
+++ b/source/documentation/0.10.0/contributing.md
@@ -0,0 +1,84 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Bill Farner (wfarner) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.10.0/cron-jobs.md b/source/documentation/0.10.0/cron-jobs.md
new file mode 100644
index 0000000..a966a6f
--- /dev/null
+++ b/source/documentation/0.10.0/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.10.0/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.10.0/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.10.0/configuration-reference/#task-objects) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.10.0/deploying-aurora-scheduler.md b/source/documentation/0.10.0/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..a483c1d
--- /dev/null
+++ b/source/documentation/0.10.0/deploying-aurora-scheduler.md
@@ -0,0 +1,326 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+  - [Security Considerations](#security-considerations)
+  - [Configuring Resource Oversubscription](#configuring-resource-oversubscription)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Best practices](#best-practices)
+  - [Diversity](#diversity)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+- [Changing Scheduler Quorum Size](#changing-scheduler-quorum-size)
+    - [Preparation](#preparation)
+    - [Adding New Schedulers](#adding-new-schedulers)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 7 or higher) and libmesos (0.23.0) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/aurora.git
+    cd aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.10.0/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on a majority of the masters.
+
+    mesos-log initialize --path="/path/to/native/log"
+
+The `--path` flag should match the `--native_log_file_path` flag to the scheduler.
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.10.0/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+*Note: Docker support is currently EXPERIMENTAL.*
+
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma seperated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7
+installed.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+## Security Considerations
+
+See [security.md](/documentation/0.10.0/security/).
+
+## Configuring Resource Oversubscription
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+See [this document](/documentation/0.10.0/configuration-reference/#revocable-jobs) for more feature details.
+
+Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path:
+
+    -tier_config=path/to/tiers/config.json
+
+Example [tier configuration file](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/aurora/scheduler/tiers-example.json)).
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.10.0/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.10.0/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+See the [section](/documentation/0.10.0/resources/#resource-quota) about resource quotas to learn how quotas apply to
+dedicated jobs.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+## Best practices
+### Diversity
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the mesos-slave with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+When it comes time to schedule jobs, Aurora will automatically spread them across the failure
+domains as specified in the
+[job configuration](/documentation/0.10.0/configuration-reference/#specifying-scheduling-constraints).
+
+Note: in virtualized environments like EC2, the only attribute that usually makes sense for this
+purpose is `host`.
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+## Changing Scheduler Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+### Preparation
+Increase [-native_log_quorum_size](/documentation/0.10.0/storage-config/#-native_log_quorum_size) on each
+existing scheduler and restart them. When updating from 3 to 5 schedulers, the quorum size
+would grow from 2 to 3.
+
+### Adding New Schedulers
+Start the new schedulers with `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](/documentation/0.10.0/storage-config/#recovering-from-a-scheduler-backup).
diff --git a/source/documentation/0.10.0/design/command-hooks.md b/source/documentation/0.10.0/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.10.0/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.10.0/developing-aurora-client.md b/source/documentation/0.10.0/developing-aurora-client.md
new file mode 100644
index 0000000..938e770
--- /dev/null
+++ b/source/documentation/0.10.0/developing-aurora-client.md
@@ -0,0 +1,91 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.10.0/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.10.0/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](images/debug-client-test.png)](images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](images/debugging-client-test.png)](images/debugging-client-test.png)
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.10.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.10.0/developing-aurora-scheduler.md b/source/documentation/0.10.0/developing-aurora-scheduler.md
new file mode 100644
index 0000000..d4fe655
--- /dev/null
+++ b/source/documentation/0.10.0/developing-aurora-scheduler.md
@@ -0,0 +1,131 @@
+Java code in the aurora repo is built with [Gradle](http://gradle.org).
+
+Getting Started
+===============
+You will need Java 7 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+Developing Aurora UI
+======================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add `/vagrant/dist/resources/main` to the head of CLASSPATH. This path is configured
+as a shared filesystem to the path on the host system where your Aurora repository lives. This means
+that any updates to dist/resources/main in your checkout will be reflected immediately in the UI
+served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle 1.8's
+[Wrapper](http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.10.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.10.0/hooks.md b/source/documentation/0.10.0/hooks.md
new file mode 100644
index 0000000..63dbdb9
--- /dev/null
+++ b/source/documentation/0.10.0/hooks.md
@@ -0,0 +1,246 @@
+# Hooks for Aurora Client API
+
+- [Introduction](#introduction)
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+## Introduction
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+```python
+def pre_create(...):
+  do_something()  # if do_something fails with an exception, the create_job is not attempted!
+  return True
+
+# However...
+def pre_create(...):
+  try:
+    do_something()  # may cause exception
+  except Exception:  # generic error trap will catch it
+    pass  # and ignore the exception
+  return True  # create_job will run in any case!
+```
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```cancel_update``` | ```self```, ```job_key``` | ```job cancel-update```
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```update_job``` | ```self```, ```config```, ```health_check_interval_seconds=3```, ```shards=None``` | ```job update```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+```python
+jobs = [
+        Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+       ]
+jobs.extend(
+   Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+   # Hooks disabled for these jobs
+```
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+```python
+# Defines a method pre_kill_job
+class KillConfirmer(object):
+  def confirm(self, msg):
+    return raw_input(msg).lower() == 'yes'
+
+  def pre_kill_job(self, job_key, shards=None):
+    shards = ('shards %s' % shards) if shards is not None else 'all shards'
+    return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                        % (job_key, shards))
+```
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+```python
+generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+```
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+```python
+# Overrides the standard do-nothing generic_hook by adding a log writing operation.
+from twitter.common import log
+  class Logger(object):
+    '''Adds to the log every time a hookable API method is called'''
+    def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+      log.info('%s: %s_%s of %s'
+               % (self.__class__.__name__, event, method_name, hook_config.job_key))
+```
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+```python
+hooks = []
+```
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+```python
+hooks = [Object_hook_definer1, Object_hook_definer2]
+```
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.10.0/images/CPUavailability.png
similarity index 100%
rename from publish/documentation/latest/images/CPUavailability.png
rename to source/documentation/0.10.0/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.10.0/images/HelloWorldJob.png
similarity index 100%
rename from publish/documentation/latest/images/HelloWorldJob.png
rename to source/documentation/0.10.0/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.10.0/images/RoleJobs.png
similarity index 100%
rename from publish/documentation/latest/images/RoleJobs.png
rename to source/documentation/0.10.0/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.10.0/images/ScheduledJobs.png
similarity index 100%
rename from publish/documentation/latest/images/ScheduledJobs.png
rename to source/documentation/0.10.0/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.10.0/images/TaskBreakdown.png
similarity index 100%
rename from publish/documentation/latest/images/TaskBreakdown.png
rename to source/documentation/0.10.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.10.0/images/aurora_hierarchy.png
similarity index 100%
rename from publish/documentation/latest/images/aurora_hierarchy.png
rename to source/documentation/0.10.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/debug-client-test.png b/source/documentation/0.10.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.10.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/debugging-client-test.png b/source/documentation/0.10.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.10.0/images/debugging-client-test.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.10.0/images/killedtask.png
similarity index 100%
rename from publish/documentation/latest/images/killedtask.png
rename to source/documentation/0.10.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.10.0/images/lifeofatask.png
similarity index 100%
rename from publish/documentation/latest/images/lifeofatask.png
rename to source/documentation/0.10.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.10.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.10.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.10.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.10.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.10.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.10.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.10.0/images/runningtask.png
similarity index 100%
rename from publish/documentation/latest/images/runningtask.png
rename to source/documentation/0.10.0/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.10.0/images/stderr.png
similarity index 100%
rename from publish/documentation/latest/images/stderr.png
rename to source/documentation/0.10.0/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.10.0/images/stdout.png
similarity index 100%
rename from publish/documentation/latest/images/stdout.png
rename to source/documentation/0.10.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.10.0/images/storage_hierarchy.png
similarity index 100%
rename from publish/documentation/latest/images/storage_hierarchy.png
rename to source/documentation/0.10.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.10.0/index.html.md b/source/documentation/0.10.0/index.html.md
new file mode 100644
index 0000000..2f42fe5
--- /dev/null
+++ b/source/documentation/0.10.0/index.html.md
@@ -0,0 +1,37 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+ 
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+We encourage you to ask questions on the [Aurora developer list](http://aurora.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.10.0/vagrant/)
+ * [Hello World Tutorial](/documentation/0.10.0/tutorial/)
+ * [User Guide](/documentation/0.10.0/user-guide/)
+ * [Configuration Tutorial](/documentation/0.10.0/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.10.0/configuration-reference/)
+ * [Command Line Client](/documentation/0.10.0/client-commands/)
+ * [Cron Jobs](/documentation/0.10.0/cron-jobs/)
+
+## Operators
+ * [Deploy Aurora](/documentation/0.10.0/deploying-aurora-scheduler/)
+ * [Monitoring](/documentation/0.10.0/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.10.0/hooks/)
+ * [Scheduler Storage](/documentation/0.10.0/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.10.0/storage-config/)
+ * [SLA Measurement](/documentation/0.10.0/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.10.0/resources/)
+ * [Generating test resources](/documentation/0.10.0/test-resource-generation/)
+
+## Developers
+ * [Contributing to the project](contributing/)
+ * [Developing the Aurora Scheduler](/documentation/0.10.0/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.10.0/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.10.0/committers/)
+ * [Build System](/documentation/0.10.0/build-system/)
+ 
+## Additional Resources
+ * [Presentation videos and slides](/documentation/0.10.0/presentations/)
diff --git a/source/documentation/0.10.0/installing.md b/source/documentation/0.10.0/installing.md
new file mode 100644
index 0000000..9276ec3
--- /dev/null
+++ b/source/documentation/0.10.0/installing.md
@@ -0,0 +1,272 @@
+# Installing Aurora
+
+- [Components](#components)
+    - [Machine profiles](#machine-profiles)
+      - [Coordinator](#coordinator)
+      - [Worker](#worker)
+      - [Client](#client)
+- [Getting Aurora](#getting-aurora)
+    - [Building your own binary packages](#building-your-own-binary-packages)
+    - [RPMs](#rpms)
+- [Installing the scheduler](#installing-the-scheduler)
+    - [Ubuntu Trusty](#ubuntu-trusty)
+    - [CentOS 7](#centos-7)
+    - [Finalizing](#finalizing)
+    - [Configuration](#configuration)
+- [Installing worker components](#installing-worker-components)
+    - [Ubuntu Trusty](#ubuntu-trusty-1)
+    - [CentOS 7](#centos-7-1)
+    - [Configuration](#configuration-1)
+- [Installing the client](#installing-the-client)
+    - [Ubuntu Trusty](#ubuntu-trusty-2)
+    - [CentOS 7](#centos-7-2)
+    - [Configuration](#configuration-2)
+- [See also](#see-also)
+- [Installing Mesos](#installing-mesos)
+    - [Mesos on Ubuntu Trusty](#mesos-on-ubuntu-trusty)
+    - [Mesos on CentOS 7](#mesos-on-centos-7)
+
+## Components
+Before installing Aurora, it's important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+* **Aurora scheduler**  
+  The scheduler will be your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**  
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler.
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**  
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.  You can find lots more detail on the executor and Thermos in the
+  [user guide](/documentation/0.10.0/user-guide/).
+
+* **Aurora observer**  
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**  
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.
+
+* **Mesos master**  
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**  
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+## Machine profiles
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+## Getting Aurora
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+### Building your own binary packages
+Our package build toolchain makes it easy to build your own packages if you would like.  See the
+[instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+### RPMs
+We currently have work in progress to provide official RPMs.  As of this writing, the suggested way
+to get RPMs is to [build them](#building-your-own-binary-packages).
+
+We do have unofficial experimental RPMs available for testing purposes.
+
+**Use these RPMs at your own risk, they are not officially released under the ASF guidelines.**
+
+    echo '[apache-aurora-wfarner]
+    name=Apache Aurora distribution maintained by wfarner
+    baseurl=http://people.apache.org/~wfarner/aurora/distributions/0.9.0/rpm/centos-7/x86_64/
+    gpgcheck = 0' | sudo tee /etc/yum.repos.d/apache-aurora-wfarner.repo > /dev/null
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        wget -c https://apache.bintray.com/aurora/aurora-scheduler_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora'.
+        sudo yum install -y aurora-scheduler
+
+Note: if you are using the unreleased 0.9.0 RPM, you will need to edit `/etc/sysconfig/aurora`:  
+Change  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos/master'`  
+To  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos'`  
+And  
+`-native_log_file_path='/var/lib/aurora/db'`  
+To  
+`-native_log_file_path='/var/lib/aurora/scheduler/db'`
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.  
+Ubuntu: `sudo stop aurora-scheduler`  
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.  
+Ubuntu: `sudo start aurora-scheduler`  
+CentOS: `sudo systemctl start aurora`
+
+### Configuration
+For more detail on this topic, see the dedicated page on
+[deploying the scheduler](/documentation/0.10.0/deploying-aurora-scheduler/)
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-slave
+
+2. Install Aurora executor and observer
+
+        wget -c https://apache.bintray.com/aurora/aurora-executor_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-executor_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora-thermos'.
+        sudo yum install -y aurora-executor
+
+### Configuration
+The executor and observer typically do not require much configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget https://apache.bintray.com/aurora/aurora-tools_0.10.0-1_amd64.deb
+    sudo dpkg -i aurora-tools_0.10.0-1_amd64.deb
+
+### CentOS 7
+If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+    # Note: for older Aurora RPM versions, this may be called 'aurora-client'.
+    sudo yum install -y aurora-tools
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](/documentation/0.10.0/configuration-reference/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+## See also
+We have other docs that you will find useful once you have your cluster up and running:
+
+- [Monitor](/documentation/0.10.0/monitoring/) your cluster
+- Enable scheduler [security](/documentation/0.10.0/security/)
+- View job SLA [statistics](/documentation/0.10.0/sla/)
+- Understand the internals of the scheduler's [storage](/documentation/0.10.0/storage/)
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and slave.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo add-apt-repository ppa:openjdk-r/ppa -y
+    sudo apt-get update
+
+    sudo apt-get install -y software-properties-common wget libsvn1 libcurl3 openjdk-8-jre-headless
+
+    wget -c http://downloads.mesosphere.io/master/ubuntu/14.04/mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+    sudo dpkg -i mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh http://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum install -y mesos-0.22.0
diff --git a/source/documentation/0.10.0/monitoring.md b/source/documentation/0.10.0/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.10.0/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.10.0/presentations.md b/source/documentation/0.10.0/presentations.md
new file mode 100644
index 0000000..9809588
--- /dev/null
+++ b/source/documentation/0.10.0/presentations.md
@@ -0,0 +1,50 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p>
+</td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.10.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
\ No newline at end of file
diff --git a/source/documentation/0.10.0/resources.md b/source/documentation/0.10.0/resources.md
new file mode 100644
index 0000000..b8be4dc
--- /dev/null
+++ b/source/documentation/0.10.0/resources.md
@@ -0,0 +1,164 @@
+Resources and Sizing
+=============================
+
+- [Introduction](#introduction)
+- [CPU Isolation](#cpu-isolation)
+- [CPU Sizing](#cpu-sizing)
+- [Memory Isolation](#memory-isolation)
+- [Memory Sizing](#memory-sizing)
+- [Disk Space](#disk-space)
+- [Disk Space Sizing](#disk-space-sizing)
+- [Other Resources](#other-resources)
+- [Resource Quota](#resource-quota)
+- [Task Preemption](#task-preemption)
+
+## Introduction
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+Let's look at each resource type in more detail:
+
+## CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+## CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+## Resource Quota
+
+Aurora requires resource quotas for
+[production non-dedicated jobs](/documentation/0.10.0/configuration-reference/#job-objects). Quota is enforced at
+the job role level and when set, defines a non-preemptible pool of compute resources within
+that role.
+
+To grant quota to a particular role in production use `aurora_admin set_quota` command.
+
+NOTE: all job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](/documentation/0.10.0/deploying-aurora-scheduler/#dedicated-attribute).
+
+## Task preemption
+
+Under a particular resource shortage pressure, tasks from
+[production](/documentation/0.10.0/configuration-reference/#job-objects) jobs may preempt tasks from any non-production
+job. A production task may only be preempted by tasks from production jobs in the same role with
+higher [priority](/documentation/0.10.0/configuration-reference/#job-objects).
\ No newline at end of file
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.10.0/scheduler-storage.md
similarity index 100%
rename from source/documentation/latest/scheduler-storage.md
rename to source/documentation/0.10.0/scheduler-storage.md
diff --git a/source/documentation/0.10.0/security.md b/source/documentation/0.10.0/security.md
new file mode 100644
index 0000000..f9b60e9
--- /dev/null
+++ b/source/documentation/0.10.0/security.md
@@ -0,0 +1,271 @@
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure.
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available.
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.10.0/sla.md b/source/documentation/0.10.0/sla.md
new file mode 100644
index 0000000..ac3af09
--- /dev/null
+++ b/source/documentation/0.10.0/sla.md
@@ -0,0 +1,177 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config). It can be enabled for
+non-production services via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.10.0/storage-config.md b/source/documentation/0.10.0/storage-config.md
new file mode 100644
index 0000000..e8176a8
--- /dev/null
+++ b/source/documentation/0.10.0/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.10.0/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.10.0/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize --path=<-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.10.0/storage.md b/source/documentation/0.10.0/storage.md
new file mode 100644
index 0000000..038dec2
--- /dev/null
+++ b/source/documentation/0.10.0/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.10.0/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.10.0/test-resource-generation.md b/source/documentation/0.10.0/test-resource-generation.md
new file mode 100644
index 0000000..b339d5d
--- /dev/null
+++ b/source/documentation/0.10.0/test-resource-generation.md
@@ -0,0 +1,24 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.10.0/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.10.0/thrift-deprecation.md b/source/documentation/0.10.0/thrift-deprecation.md
new file mode 100644
index 0000000..39679ab
--- /dev/null
+++ b/source/documentation/0.10.0/thrift-deprecation.md
@@ -0,0 +1,50 @@
+# Thrift API Changes
+
+## Overview
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+## Checklist
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+## Deprecation cycle
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used
+* Check [storage.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/storage.thrift)) to see if the
+affected struct is stored in Aurora scheduler storage. If so, you most likely need to backfill
+existing data to ensure both fields are populated eagerly on startup
+See [StorageBackfill.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/storage/StorageBackfill.java))
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove the deprecated Thrift field
+
+## Testing
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](/documentation/0.10.0/vagrant/)
+for more.
+
diff --git a/source/documentation/0.10.0/tutorial.md b/source/documentation/0.10.0/tutorial.md
new file mode 100644
index 0000000..6626575
--- /dev/null
+++ b/source/documentation/0.10.0/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.apache.org](mailto:dev@aurora.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.10.0/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.10.0/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.10.0/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora job create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora job update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.10.0/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.10.0/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.10.0/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.10.0/client-commands/) document.
diff --git a/source/documentation/0.10.0/user-guide.md b/source/documentation/0.10.0/user-guide.md
new file mode 100644
index 0000000..ac474a9
--- /dev/null
+++ b/source/documentation/0.10.0/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.10.0/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora job update <job_key_value>/0-1 new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.10.0/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.10.0/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.10.0/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.10.0/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.10.0/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora job open`:
+
+      aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.10.0/client-commands/).
diff --git a/source/documentation/0.10.0/vagrant.md b/source/documentation/0.10.0/vagrant.md
new file mode 100644
index 0000000..e25d2d2
--- /dev/null
+++ b/source/documentation/0.10.0/vagrant.md
@@ -0,0 +1,137 @@
+Getting Started
+===============
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](/documentation/0.10.0/tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/0.11.0/build-system.md b/source/documentation/0.11.0/build-system.md
new file mode 100644
index 0000000..39c231d
--- /dev/null
+++ b/source/documentation/0.11.0/build-system.md
@@ -0,0 +1,100 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions: 
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1 
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+```
+% find src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner/__init__.py
+src/main/python/apache/thermos/runner/thermos_runner.py
+src/main/python/apache/thermos/runner/BUILD
+% cat src/main/python/apache/thermos/runner/BUILD
+# License boilerplate omitted
+import os
+
+
+# Private target so that a setup_py can exist without a circular dependency. Only targets within
+# this file should depend on this.
+python_library(
+  name = '_runner',
+  # The target covers every python file under this directory and subdirectories.
+  sources = rglobs('*.py'),
+  dependencies = [
+    '3rdparty/python:twitter.common.app',
+    '3rdparty/python:twitter.common.log',
+    # Source dependencies are always referenced without a ':'.
+    'src/main/python/apache/thermos/common',
+    'src/main/python/apache/thermos/config',
+    'src/main/python/apache/thermos/core',
+  ],
+)
+
+# Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+# argument to ./pants binary.
+python_binary(
+  name = 'thermos_runner',
+  # Use entry_point, not source so the files used here are the same ones tests see.
+  entry_point = 'apache.thermos.bin.thermos_runner',
+  dependencies = [
+    # Notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+)
+
+# The public library that everyone importing the runner symbols uses.
+# The test targets and any other dependent source code should depend on this.
+python_library(
+  name = 'runner',
+  dependencies = [
+    # Again, notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+  # We always provide a setup_py. This will cause any dependee libraries to automatically
+  # reference this library in their requirements.txt rather than copy the source files into their
+  # sdist.
+  provides = setup_py(
+    # Conventionally named and versioned.
+    name = 'apache.thermos.runner',
+    version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+  ).with_binaries({
+    # Every binary in this file should also be repeated here.
+    # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+    # supported.
+    # The console script name is always the same as the PEX with .pex stripped.
+    'thermos_runner': ':thermos_runner',
+  }),
+)
+```
diff --git a/source/documentation/0.11.0/client-cluster-configuration.md b/source/documentation/0.11.0/client-cluster-configuration.md
new file mode 100644
index 0000000..540952b
--- /dev/null
+++ b/source/documentation/0.11.0/client-cluster-configuration.md
@@ -0,0 +1,70 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu. The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to mesos slave work dir (Required)
+   **slave_run_directory** | String   | Name of mesos slave run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+#### name
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+#### slave_root
+
+The path on the mesos slaves where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+#### slave_run_directory
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+#### zk
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+#### zk_port
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+#### scheduler_zk_path
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+#### scheduler_uri
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+#### proxy_url
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+#### auth_mechanism
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](/documentation/0.11.0/security/).
diff --git a/source/documentation/0.11.0/client-commands.md b/source/documentation/0.11.0/client-commands.md
new file mode 100644
index 0000000..2780041
--- /dev/null
+++ b/source/documentation/0.11.0/client-commands.md
@@ -0,0 +1,379 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.11.0/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.11.0/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+#### Coordinated job updates
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](/documentation/0.11.0/configuration-reference/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.11.0/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](/documentation/0.11.0/deploying-aurora-scheduler/#dedicated-attribute)
+[production](/documentation/0.11.0/configuration-reference/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.11.0/committers.md b/source/documentation/0.11.0/committers.md
new file mode 100644
index 0000000..e6b1646
--- /dev/null
+++ b/source/documentation/0.11.0/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.11.0/configuration-reference.md b/source/documentation/0.11.0/configuration-reference.md
new file mode 100644
index 0000000..7fbdc38
--- /dev/null
+++ b/source/documentation/0.11.0/configuration-reference.md
@@ -0,0 +1,690 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+      - [logger](#logger)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [Revocable Jobs](#revocable-jobs)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.11.0/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.11.0/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to allow stderr/stdout logs to grow unbounded. In the event
+that you have large log volume, you may want to configure Thermos to automatically rotate logs
+after they grow to a certain size, which can prevent your job from using more than its allocated
+disk space.
+
+A Logger union consists of a mode enum and a rotation policy. Rotation policies only apply to
+loggers whose mode is `rotate`. The acceptable values for the LoggerMode enum are `standard`
+and `rotate`. The rotation policy applies to both stderr and stdout.
+
+By default, all processes use the `standard` LoggerMode.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **mode**           | LoggerMode   | Mode of the logger. (Required)
+   **rotate**         | RotatePolicy | An optional rotation policy.
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate`. It is ignored
+otherwise.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.11.0/resources/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.11.0/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](/documentation/0.11.0/resources/#task-preemption) other tasks (Default: False). Production job role must have the appropriate [quota](/documentation/0.11.0/resources/#resource-quota).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. When set to `revocable` requires the task to run with Mesos revocable resources. This is work [in progress](https://issues.apache.org/jira/browse/AURORA-1343) and is currently only supported for the revocable tasks. The ultimate goal is to simplify task configuration by hiding various configuration knobs behind a task tier definition. See AURORA-1343 and AURORA-1443 for more details.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### Revocable Jobs
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+More implementation details in this [ticket](https://issues.apache.org/jira/browse/AURORA-1343).
+
+Scheduler must be [configured](/documentation/0.11.0/deploying-aurora-scheduler/#configuring-resource-oversubscription)
+to receive revocable offers from Mesos and accept revocable jobs. If not configured properly
+revocable tasks will never get assigned to hosts and will stay in PENDING.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](/documentation/0.11.0/client-commands/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+*Note: ```endpoint```, ```expected_response``` and ```expected_response_code``` are deprecated from ```HealthCheckConfig``` and must be definied in ```HttpHealthChecker```.*
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| *```endpoint```*               | String    | HTTP endpoint to check (Default: /health) **Deprecated.**
+| *```expected_response```*      | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok) **Deprecated.**
+| *```expected_response_code```* | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0) **Deprecated.**
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.11.0/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Object
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `enable_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters. 
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HTTPLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HTTPLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HTTPLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` slave
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.11.0/configuration-tutorial.md b/source/documentation/0.11.0/configuration-tutorial.md
new file mode 100644
index 0000000..7e27001
--- /dev/null
+++ b/source/documentation/0.11.0/configuration-tutorial.md
@@ -0,0 +1,954 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.11.0/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.11.0/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.11.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.11.0/configuration-reference/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.11.0/configuration-reference/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora+Thermos Configuration Reference](/documentation/0.11.0/configuration-reference/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.11.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.11.0/cron-jobs.md b/source/documentation/0.11.0/cron-jobs.md
new file mode 100644
index 0000000..276cfe2
--- /dev/null
+++ b/source/documentation/0.11.0/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.11.0/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.11.0/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.11.0/configuration-reference/#task-objects) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.11.0/deploying-aurora-scheduler.md b/source/documentation/0.11.0/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..8ddc5de
--- /dev/null
+++ b/source/documentation/0.11.0/deploying-aurora-scheduler.md
@@ -0,0 +1,340 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+  - [Security Considerations](#security-considerations)
+  - [Configuring Resource Oversubscription](#configuring-resource-oversubscription)
+  - [Process Log Rotation](#process-log-rotation)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Best practices](#best-practices)
+  - [Diversity](#diversity)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+- [Changing Scheduler Quorum Size](#changing-scheduler-quorum-size)
+    - [Preparation](#preparation)
+    - [Adding New Schedulers](#adding-new-schedulers)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 8 or higher) and libmesos (0.25.0) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/aurora.git
+    cd aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.11.0/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on a majority of the schedulers.
+
+    mesos-log initialize --path="/path/to/native/log"
+
+The `--path` flag should match the `--native_log_file_path` flag to the scheduler.
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.11.0/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7
+installed.
+
+### Process Log Rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+-thermos_executor_flags argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+## Security Considerations
+
+See [security.md](/documentation/0.11.0/security/).
+
+## Configuring Resource Oversubscription
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+See [this document](/documentation/0.11.0/configuration-reference/#revocable-jobs) for more feature details.
+
+Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path:
+
+    -tier_config=path/to/tiers/config.json
+
+Example [tier configuration file](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/aurora/scheduler/tiers-example.json)).
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.11.0/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.11.0/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+See the [section](/documentation/0.11.0/resources/#resource-quota) about resource quotas to learn how quotas apply to
+dedicated jobs.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+## Best practices
+### Diversity
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the mesos-slave with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+When it comes time to schedule jobs, Aurora will automatically spread them across the failure
+domains as specified in the
+[job configuration](/documentation/0.11.0/configuration-reference/#specifying-scheduling-constraints).
+
+Note: in virtualized environments like EC2, the only attribute that usually makes sense for this
+purpose is `host`.
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+## Changing Scheduler Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+### Preparation
+Increase [-native_log_quorum_size](/documentation/0.11.0/storage-config/#-native_log_quorum_size) on each
+existing scheduler and restart them. When updating from 3 to 5 schedulers, the quorum size
+would grow from 2 to 3.
+
+### Adding New Schedulers
+Start the new schedulers with `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](/documentation/0.11.0/storage-config/#recovering-from-a-scheduler-backup).
diff --git a/source/documentation/0.11.0/design/command-hooks.md b/source/documentation/0.11.0/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.11.0/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.11.0/developing-aurora-client.md b/source/documentation/0.11.0/developing-aurora-client.md
new file mode 100644
index 0000000..b4e0106
--- /dev/null
+++ b/source/documentation/0.11.0/developing-aurora-client.md
@@ -0,0 +1,93 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.11.0/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.11.0/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](images/debug-client-test.png)](images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](images/debugging-client-test.png)](images/debugging-client-test.png)
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.11.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.11.0/developing-aurora-scheduler.md b/source/documentation/0.11.0/developing-aurora-scheduler.md
new file mode 100644
index 0000000..62522ab
--- /dev/null
+++ b/source/documentation/0.11.0/developing-aurora-scheduler.md
@@ -0,0 +1,163 @@
+Java code in the aurora repo is built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    bash src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+Developing Aurora UI
+======================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add `/vagrant/dist/resources/main` to the head of CLASSPATH. This path is configured
+as a shared filesystem to the path on the host system where your Aurora repository lives. This means
+that any updates to dist/resources/main in your checkout will be reflected immediately in the UI
+served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle 1.8's
+[Wrapper](http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.11.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.11.0/hooks.md b/source/documentation/0.11.0/hooks.md
new file mode 100644
index 0000000..28307ab
--- /dev/null
+++ b/source/documentation/0.11.0/hooks.md
@@ -0,0 +1,244 @@
+# Hooks for Aurora Client API
+
+- [Introduction](#introduction)
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+## Introduction
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+```python
+def pre_create(...):
+  do_something()  # if do_something fails with an exception, the create_job is not attempted!
+  return True
+
+# However...
+def pre_create(...):
+  try:
+    do_something()  # may cause exception
+  except Exception:  # generic error trap will catch it
+    pass  # and ignore the exception
+  return True  # create_job will run in any case!
+```
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+```python
+jobs = [
+        Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+       ]
+jobs.extend(
+   Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+   # Hooks disabled for these jobs
+```
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+```python
+# Defines a method pre_kill_job
+class KillConfirmer(object):
+  def confirm(self, msg):
+    return raw_input(msg).lower() == 'yes'
+
+  def pre_kill_job(self, job_key, shards=None):
+    shards = ('shards %s' % shards) if shards is not None else 'all shards'
+    return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                        % (job_key, shards))
+```
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+```python
+generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+```
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+```python
+# Overrides the standard do-nothing generic_hook by adding a log writing operation.
+from twitter.common import log
+  class Logger(object):
+    '''Adds to the log every time a hookable API method is called'''
+    def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+      log.info('%s: %s_%s of %s'
+               % (self.__class__.__name__, event, method_name, hook_config.job_key))
+```
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+```python
+hooks = []
+```
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+```python
+hooks = [Object_hook_definer1, Object_hook_definer2]
+```
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.11.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.11.0/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.11.0/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.11.0/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.11.0/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.11.0/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.11.0/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.11.0/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.11.0/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.11.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.11.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.11.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.11.0/images/aurora_logo.png
similarity index 100%
rename from publish/assets/img/aurora_logo.png
rename to source/documentation/0.11.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/components.odg b/source/documentation/0.11.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.11.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.11.0/images/components.png b/source/documentation/0.11.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.11.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/debug-client-test.png b/source/documentation/0.11.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.11.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/debugging-client-test.png b/source/documentation/0.11.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.11.0/images/debugging-client-test.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.11.0/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.11.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.11.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.11.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.11.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.11.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.11.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.11.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.11.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.11.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.11.0/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.11.0/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.11.0/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.11.0/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.11.0/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.11.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.11.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.11.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.11.0/index.html.md b/source/documentation/0.11.0/index.html.md
new file mode 100644
index 0000000..35dcaa5
--- /dev/null
+++ b/source/documentation/0.11.0/index.html.md
@@ -0,0 +1,44 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+We encourage you to ask questions on the [Aurora developer list](http://aurora.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.11.0/vagrant/)
+ * [Hello World Tutorial](/documentation/0.11.0/tutorial/)
+ * [User Guide](/documentation/0.11.0/user-guide/)
+ * [Configuration Tutorial](/documentation/0.11.0/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.11.0/configuration-reference/)
+ * [Command Line Client](/documentation/0.11.0/client-commands/)
+ * [Client cluster configuration](/documentation/0.11.0/client-cluster-configuration/)
+ * [Cron Jobs](/documentation/0.11.0/cron-jobs/)
+
+## Operators
+ * [Installation](/documentation/0.11.0/installing/)
+ * [Deployment and cluster configuraiton](/documentation/0.11.0/deploying-aurora-scheduler/)
+ * [Security](/documentation/0.11.0/security/)
+ * [Monitoring](/documentation/0.11.0/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.11.0/hooks/)
+ * [Scheduler Storage](/documentation/0.11.0/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.11.0/storage-config/)
+ * [Scheduler Storage Performance Tuning](/documentation/0.11.0/scheduler-storage/)
+ * [SLA Measurement](/documentation/0.11.0/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.11.0/resources/)
+
+## Developers
+ * [Contributing to the project](contributing/)
+ * [Developing the Aurora Scheduler](/documentation/0.11.0/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.11.0/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.11.0/committers/)
+ * [Deprecation Guide](/documentation/0.11.0/thrift-deprecation/)
+ * [Build System](/documentation/0.11.0/build-system/)
+ * [Generating test resources](/documentation/0.11.0/test-resource-generation/)
+
+
+## Additional Resources
+ * [Tools integrating with Aurora](/documentation/0.11.0/tools/)
+ * [Presentation videos and slides](/documentation/0.11.0/presentations/)
diff --git a/source/documentation/0.11.0/installing.md b/source/documentation/0.11.0/installing.md
new file mode 100644
index 0000000..1427d9d
--- /dev/null
+++ b/source/documentation/0.11.0/installing.md
@@ -0,0 +1,274 @@
+# Installing Aurora
+
+- [Components](#components)
+    - [Machine profiles](#machine-profiles)
+      - [Coordinator](#coordinator)
+      - [Worker](#worker)
+      - [Client](#client)
+- [Getting Aurora](#getting-aurora)
+    - [Building your own binary packages](#building-your-own-binary-packages)
+    - [RPMs](#rpms)
+- [Installing the scheduler](#installing-the-scheduler)
+    - [Ubuntu Trusty](#ubuntu-trusty)
+    - [CentOS 7](#centos-7)
+    - [Finalizing](#finalizing)
+    - [Configuration](#configuration)
+- [Installing worker components](#installing-worker-components)
+    - [Ubuntu Trusty](#ubuntu-trusty-1)
+    - [CentOS 7](#centos-7-1)
+    - [Configuration](#configuration-1)
+- [Installing the client](#installing-the-client)
+    - [Ubuntu Trusty](#ubuntu-trusty-2)
+    - [CentOS 7](#centos-7-2)
+    - [Configuration](#configuration-2)
+- [See also](#see-also)
+- [Installing Mesos](#installing-mesos)
+    - [Mesos on Ubuntu Trusty](#mesos-on-ubuntu-trusty)
+    - [Mesos on CentOS 7](#mesos-on-centos-7)
+
+## Components
+Before installing Aurora, it's important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](images/components.png)
+
+* **Aurora scheduler**  
+  The scheduler will be your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**  
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler.
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**  
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.  You can find lots more detail on the executor and Thermos in the
+  [user guide](/documentation/0.11.0/user-guide/).
+
+* **Aurora observer**  
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**  
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.
+
+* **Mesos master**  
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**  
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+## Machine profiles
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+## Getting Aurora
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+### Building your own binary packages
+Our package build toolchain makes it easy to build your own packages if you would like.  See the
+[instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+### RPMs
+We currently have work in progress to provide official RPMs.  As of this writing, the suggested way
+to get RPMs is to [build them](#building-your-own-binary-packages).
+
+We do have unofficial experimental RPMs available for testing purposes.
+
+**Use these RPMs at your own risk, they are not officially released under the ASF guidelines.**
+
+    echo '[apache-aurora-wfarner]
+    name=Apache Aurora distribution maintained by wfarner
+    baseurl=http://people.apache.org/~wfarner/aurora/distributions/0.9.0/rpm/centos-7/x86_64/
+    gpgcheck = 0' | sudo tee /etc/yum.repos.d/apache-aurora-wfarner.repo > /dev/null
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        wget -c https://apache.bintray.com/aurora/aurora-scheduler_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora'.
+        sudo yum install -y aurora-scheduler
+
+Note: if you are using the unreleased 0.9.0 RPM, you will need to edit `/etc/sysconfig/aurora`:  
+Change  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos/master'`  
+To  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos'`  
+And  
+`-native_log_file_path='/var/lib/aurora/db'`  
+To  
+`-native_log_file_path='/var/lib/aurora/scheduler/db'`
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.  
+Ubuntu: `sudo stop aurora-scheduler`  
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.  
+Ubuntu: `sudo start aurora-scheduler`  
+CentOS: `sudo systemctl start aurora`
+
+### Configuration
+For more detail on this topic, see the dedicated page on
+[deploying the scheduler](/documentation/0.11.0/deploying-aurora-scheduler/)
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-slave
+
+2. Install Aurora executor and observer
+
+        wget -c https://apache.bintray.com/aurora/aurora-executor_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-executor_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora-thermos'.
+        sudo yum install -y aurora-executor
+
+### Configuration
+The executor and observer typically do not require much configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget https://apache.bintray.com/aurora/aurora-tools_0.10.0-1_amd64.deb
+    sudo dpkg -i aurora-tools_0.10.0-1_amd64.deb
+
+### CentOS 7
+If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+    # Note: for older Aurora RPM versions, this may be called 'aurora-client'.
+    sudo yum install -y aurora-tools
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](/documentation/0.11.0/configuration-reference/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+## See also
+We have other docs that you will find useful once you have your cluster up and running:
+
+- [Monitor](/documentation/0.11.0/monitoring/) your cluster
+- Enable scheduler [security](/documentation/0.11.0/security/)
+- View job SLA [statistics](/documentation/0.11.0/sla/)
+- Understand the internals of the scheduler's [storage](/documentation/0.11.0/storage/)
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and slave.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo add-apt-repository ppa:openjdk-r/ppa -y
+    sudo apt-get update
+
+    sudo apt-get install -y software-properties-common wget libsvn1 libcurl3 openjdk-8-jre-headless
+
+    wget -c http://downloads.mesosphere.io/master/ubuntu/14.04/mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+    sudo dpkg -i mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh http://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum install -y mesos-0.22.0
diff --git a/source/documentation/0.11.0/monitoring.md b/source/documentation/0.11.0/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.11.0/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.11.0/presentations.md b/source/documentation/0.11.0/presentations.md
new file mode 100644
index 0000000..837a826
--- /dev/null
+++ b/source/documentation/0.11.0/presentations.md
@@ -0,0 +1,50 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p>
+</td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.11.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
\ No newline at end of file
diff --git a/source/documentation/0.11.0/resources.md b/source/documentation/0.11.0/resources.md
new file mode 100644
index 0000000..4405227
--- /dev/null
+++ b/source/documentation/0.11.0/resources.md
@@ -0,0 +1,164 @@
+Resources and Sizing
+=============================
+
+- [Introduction](#introduction)
+- [CPU Isolation](#cpu-isolation)
+- [CPU Sizing](#cpu-sizing)
+- [Memory Isolation](#memory-isolation)
+- [Memory Sizing](#memory-sizing)
+- [Disk Space](#disk-space)
+- [Disk Space Sizing](#disk-space-sizing)
+- [Other Resources](#other-resources)
+- [Resource Quota](#resource-quota)
+- [Task Preemption](#task-preemption)
+
+## Introduction
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+Let's look at each resource type in more detail:
+
+## CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+## CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+## Resource Quota
+
+Aurora requires resource quotas for
+[production non-dedicated jobs](/documentation/0.11.0/configuration-reference/#job-objects). Quota is enforced at
+the job role level and when set, defines a non-preemptible pool of compute resources within
+that role.
+
+To grant quota to a particular role in production use `aurora_admin set_quota` command.
+
+NOTE: all job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](/documentation/0.11.0/deploying-aurora-scheduler/#dedicated-attribute).
+
+## Task preemption
+
+Under a particular resource shortage pressure, tasks from
+[production](/documentation/0.11.0/configuration-reference/#job-objects) jobs may preempt tasks from any non-production
+job. A production task may only be preempted by tasks from production jobs in the same role with
+higher [priority](/documentation/0.11.0/configuration-reference/#job-objects).
\ No newline at end of file
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.11.0/scheduler-storage.md
similarity index 100%
copy from source/documentation/latest/scheduler-storage.md
copy to source/documentation/0.11.0/scheduler-storage.md
diff --git a/source/documentation/0.11.0/security.md b/source/documentation/0.11.0/security.md
new file mode 100644
index 0000000..f9b60e9
--- /dev/null
+++ b/source/documentation/0.11.0/security.md
@@ -0,0 +1,271 @@
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure.
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available.
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.11.0/sla.md b/source/documentation/0.11.0/sla.md
new file mode 100644
index 0000000..ac3af09
--- /dev/null
+++ b/source/documentation/0.11.0/sla.md
@@ -0,0 +1,177 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config). It can be enabled for
+non-production services via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.11.0/storage-config.md b/source/documentation/0.11.0/storage-config.md
new file mode 100644
index 0000000..3110499
--- /dev/null
+++ b/source/documentation/0.11.0/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.11.0/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.11.0/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize --path=<-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.11.0/storage.md b/source/documentation/0.11.0/storage.md
new file mode 100644
index 0000000..7733e23
--- /dev/null
+++ b/source/documentation/0.11.0/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.11.0/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.11.0/test-resource-generation.md b/source/documentation/0.11.0/test-resource-generation.md
new file mode 100644
index 0000000..707e2a5
--- /dev/null
+++ b/source/documentation/0.11.0/test-resource-generation.md
@@ -0,0 +1,24 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.11.0/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.11.0/thrift-deprecation.md b/source/documentation/0.11.0/thrift-deprecation.md
new file mode 100644
index 0000000..9b6be17
--- /dev/null
+++ b/source/documentation/0.11.0/thrift-deprecation.md
@@ -0,0 +1,50 @@
+# Thrift API Changes
+
+## Overview
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+## Checklist
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+## Deprecation cycle
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used
+* Check [storage.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/storage.thrift)) to see if the
+affected struct is stored in Aurora scheduler storage. If so, you most likely need to backfill
+existing data to ensure both fields are populated eagerly on startup
+See [StorageBackfill.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/storage/StorageBackfill.java))
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove the deprecated Thrift field
+
+## Testing
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](/documentation/0.11.0/vagrant/)
+for more.
+
diff --git a/source/documentation/0.11.0/tools.md b/source/documentation/0.11.0/tools.md
new file mode 100644
index 0000000..c14aa2e
--- /dev/null
+++ b/source/documentation/0.11.0/tools.md
@@ -0,0 +1,16 @@
+# Tools
+
+Various tools integrate with Aurora. There is a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balacing technology used to direct traffic to services running on Aurora
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performing HTTP redirects for easy developers and administor access
+
+* Monitoring
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packaes
diff --git a/source/documentation/0.11.0/tutorial.md b/source/documentation/0.11.0/tutorial.md
new file mode 100644
index 0000000..7a02e0f
--- /dev/null
+++ b/source/documentation/0.11.0/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.apache.org](mailto:dev@aurora.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.11.0/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.11.0/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.11.0/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora job create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.11.0/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.11.0/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.11.0/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.11.0/client-commands/) document.
diff --git a/source/documentation/0.11.0/user-guide.md b/source/documentation/0.11.0/user-guide.md
new file mode 100644
index 0000000..ec7c3e3
--- /dev/null
+++ b/source/documentation/0.11.0/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.11.0/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora update start <job_key_value>/0-1 new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.11.0/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.11.0/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.11.0/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.11.0/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.11.0/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora job open`:
+
+      aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.11.0/client-commands/).
diff --git a/source/documentation/0.11.0/vagrant.md b/source/documentation/0.11.0/vagrant.md
new file mode 100644
index 0000000..838b7a9
--- /dev/null
+++ b/source/documentation/0.11.0/vagrant.md
@@ -0,0 +1,137 @@
+Getting Started
+===============
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](/documentation/0.11.0/tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/0.12.0/build-system.md b/source/documentation/0.12.0/build-system.md
new file mode 100644
index 0000000..39c231d
--- /dev/null
+++ b/source/documentation/0.12.0/build-system.md
@@ -0,0 +1,100 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions: 
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1 
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+```
+% find src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner
+src/main/python/apache/thermos/runner/__init__.py
+src/main/python/apache/thermos/runner/thermos_runner.py
+src/main/python/apache/thermos/runner/BUILD
+% cat src/main/python/apache/thermos/runner/BUILD
+# License boilerplate omitted
+import os
+
+
+# Private target so that a setup_py can exist without a circular dependency. Only targets within
+# this file should depend on this.
+python_library(
+  name = '_runner',
+  # The target covers every python file under this directory and subdirectories.
+  sources = rglobs('*.py'),
+  dependencies = [
+    '3rdparty/python:twitter.common.app',
+    '3rdparty/python:twitter.common.log',
+    # Source dependencies are always referenced without a ':'.
+    'src/main/python/apache/thermos/common',
+    'src/main/python/apache/thermos/config',
+    'src/main/python/apache/thermos/core',
+  ],
+)
+
+# Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+# argument to ./pants binary.
+python_binary(
+  name = 'thermos_runner',
+  # Use entry_point, not source so the files used here are the same ones tests see.
+  entry_point = 'apache.thermos.bin.thermos_runner',
+  dependencies = [
+    # Notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+)
+
+# The public library that everyone importing the runner symbols uses.
+# The test targets and any other dependent source code should depend on this.
+python_library(
+  name = 'runner',
+  dependencies = [
+    # Again, notice that we depend only on the single private target from this BUILD file here.
+    ':_runner',
+  ],
+  # We always provide a setup_py. This will cause any dependee libraries to automatically
+  # reference this library in their requirements.txt rather than copy the source files into their
+  # sdist.
+  provides = setup_py(
+    # Conventionally named and versioned.
+    name = 'apache.thermos.runner',
+    version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+  ).with_binaries({
+    # Every binary in this file should also be repeated here.
+    # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+    # supported.
+    # The console script name is always the same as the PEX with .pex stripped.
+    'thermos_runner': ':thermos_runner',
+  }),
+)
+```
diff --git a/source/documentation/0.12.0/client-cluster-configuration.md b/source/documentation/0.12.0/client-cluster-configuration.md
new file mode 100644
index 0000000..3d363c3
--- /dev/null
+++ b/source/documentation/0.12.0/client-cluster-configuration.md
@@ -0,0 +1,70 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu. The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to mesos slave work dir (Required)
+   **slave_run_directory** | String   | Name of mesos slave run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+#### name
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+#### slave_root
+
+The path on the mesos slaves where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+#### slave_run_directory
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+#### zk
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+#### zk_port
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+#### scheduler_zk_path
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+#### scheduler_uri
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+#### proxy_url
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+#### auth_mechanism
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](/documentation/0.12.0/security/).
diff --git a/source/documentation/0.12.0/client-commands.md b/source/documentation/0.12.0/client-commands.md
new file mode 100644
index 0000000..6dc5c8d
--- /dev/null
+++ b/source/documentation/0.12.0/client-commands.md
@@ -0,0 +1,379 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.12.0/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.12.0/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+#### Coordinated job updates
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](/documentation/0.12.0/configuration-reference/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.12.0/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](/documentation/0.12.0/deploying-aurora-scheduler/#dedicated-attribute)
+[production](/documentation/0.12.0/configuration-reference/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.12.0/committers.md b/source/documentation/0.12.0/committers.md
new file mode 100644
index 0000000..3633548
--- /dev/null
+++ b/source/documentation/0.12.0/committers.md
@@ -0,0 +1,81 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). Now address any issues and go back to
+step #1 and run again, this time you will use the -r flag to increment the release candidate
+version. This will automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.12.0/configuration-reference.md b/source/documentation/0.12.0/configuration-reference.md
new file mode 100644
index 0000000..176d1a8
--- /dev/null
+++ b/source/documentation/0.12.0/configuration-reference.md
@@ -0,0 +1,695 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+      - [logger](#logger)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [Revocable Jobs](#revocable-jobs)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.12.0/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.12.0/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store  stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically rotate logs
+after they grow to a certain size, which can prevent your job from using more than its allocated
+disk space.
+
+A Logger union consists of a destination enum, a mode enum and a rotation policy.
+It's to set where the process logs should be sent using `destination`. Default
+option is `file`. Its also possible to specify `console` to get logs output
+to stdout/stderr, `none` to suppress any logs output or `both` to send logs to files and
+console output. In case of using `none` or `console` rotation attributes are ignored.
+Rotation policies only apply to loggers whose mode is `rotate`. The acceptable values
+for the LoggerMode enum are `standard` and `rotate`. The rotation policy applies to both
+stderr and stdout.
+
+By default, all processes use the `standard` LoggerMode.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy.
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate`. It is ignored
+otherwise.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.12.0/resources/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.12.0/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](/documentation/0.12.0/resources/#task-preemption) other tasks (Default: False). Production job role must have the appropriate [quota](/documentation/0.12.0/resources/#resource-quota).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. When set to `revocable` requires the task to run with Mesos revocable resources. This is work [in progress](https://issues.apache.org/jira/browse/AURORA-1343) and is currently only supported for the revocable tasks. The ultimate goal is to simplify task configuration by hiding various configuration knobs behind a task tier definition. See AURORA-1343 and AURORA-1443 for more details.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### Revocable Jobs
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+More implementation details in this [ticket](https://issues.apache.org/jira/browse/AURORA-1343).
+
+Scheduler must be [configured](/documentation/0.12.0/deploying-aurora-scheduler/#configuring-resource-oversubscription)
+to receive revocable offers from Mesos and accept revocable jobs. If not configured properly
+revocable tasks will never get assigned to hosts and will stay in PENDING.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](/documentation/0.12.0/client-commands/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+*Note: ```endpoint```, ```expected_response``` and ```expected_response_code``` are deprecated from ```HealthCheckConfig``` and must be definied in ```HttpHealthChecker```.*
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+zk_path parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.12.0/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the --announcer-allow-custom-serverset-path parameter)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Objects
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `enable_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters. 
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HTTPLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HTTPLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HTTPLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` slave
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.12.0/configuration-tutorial.md b/source/documentation/0.12.0/configuration-tutorial.md
new file mode 100644
index 0000000..edd99b1
--- /dev/null
+++ b/source/documentation/0.12.0/configuration-tutorial.md
@@ -0,0 +1,954 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.12.0/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.12.0/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.12.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.12.0/configuration-reference/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [*Aurora+Thermos Configuration Reference*](/documentation/0.12.0/configuration-reference/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora+Thermos Configuration Reference](/documentation/0.12.0/configuration-reference/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.12.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.12.0/cron-jobs.md b/source/documentation/0.12.0/cron-jobs.md
new file mode 100644
index 0000000..802bb39
--- /dev/null
+++ b/source/documentation/0.12.0/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.12.0/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.12.0/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.12.0/configuration-reference/#task-objects) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.12.0/deploying-aurora-scheduler.md b/source/documentation/0.12.0/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..2cf5492
--- /dev/null
+++ b/source/documentation/0.12.0/deploying-aurora-scheduler.md
@@ -0,0 +1,372 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+  - [Security Considerations](#security-considerations)
+  - [Configuring Resource Oversubscription](#configuring-resource-oversubscription)
+  - [Process Logs](#process-logs)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Best practices](#best-practices)
+  - [Diversity](#diversity)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+- [Changing Scheduler Quorum Size](#changing-scheduler-quorum-size)
+    - [Preparation](#preparation)
+    - [Adding New Schedulers](#adding-new-schedulers)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 8 or higher) and libmesos (0.25.0) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/aurora.git
+    cd aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.12.0/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on a majority of the schedulers.
+
+    mesos-log initialize --path="/path/to/native/log"
+
+The `--path` flag should match the `--native_log_file_path` flag to the scheduler.
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.12.0/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7
+installed.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Process Logs
+
+#### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object configuration
+allows specifying alternate log file destinations like streamed stdout/stderr or suppression of all log output.
+Default behavior can be configured for the entire cluster with the following flag (through the `-thermos_executor_flags`
+argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [this document](/documentation/0.12.0/configuration-reference/#logger) for all destination options.
+
+#### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+-thermos_executor_flags argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+## Security Considerations
+
+See [security.md](/documentation/0.12.0/security/).
+
+## Configuring Resource Oversubscription
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+See [this document](/documentation/0.12.0/configuration-reference/#revocable-jobs) for more feature details.
+
+Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path:
+
+    -tier_config=path/to/tiers/config.json
+
+Example [tier configuration file](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/aurora/scheduler/tiers-example.json)).
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.12.0/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.12.0/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+See the [section](/documentation/0.12.0/resources/#resource-quota) about resource quotas to learn how quotas apply to
+dedicated jobs.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+## Best practices
+### Diversity
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the mesos-slave with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+When it comes time to schedule jobs, Aurora will automatically spread them across the failure
+domains as specified in the
+[job configuration](/documentation/0.12.0/configuration-reference/#specifying-scheduling-constraints).
+
+Note: in virtualized environments like EC2, the only attribute that usually makes sense for this
+purpose is `host`.
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+## Changing Scheduler Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+### Preparation
+Increase [-native_log_quorum_size](/documentation/0.12.0/storage-config/#-native_log_quorum_size) on each
+existing scheduler and restart them. When updating from 3 to 5 schedulers, the quorum size
+would grow from 2 to 3.
+
+### Adding New Schedulers
+Start the new schedulers with `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](/documentation/0.12.0/storage-config/#recovering-from-a-scheduler-backup).
diff --git a/source/documentation/0.12.0/design-documents.md b/source/documentation/0.12.0/design-documents.md
new file mode 100644
index 0000000..71d6c4c
--- /dev/null
+++ b/source/documentation/0.12.0/design-documents.md
@@ -0,0 +1,17 @@
+# Design Documents
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](design/command-hooks.md)
+* [Health Checks for Updates](https://docs.google.com/document/d/1ZdgW8S4xMhvKW7iQUX99xZm10NXSxEWR0a-21FP5d94/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.12.0/design/command-hooks.md b/source/documentation/0.12.0/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.12.0/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.12.0/developing-aurora-client.md b/source/documentation/0.12.0/developing-aurora-client.md
new file mode 100644
index 0000000..07f96f5
--- /dev/null
+++ b/source/documentation/0.12.0/developing-aurora-client.md
@@ -0,0 +1,93 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.12.0/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.12.0/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](images/debug-client-test.png)](images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](images/debugging-client-test.png)](images/debugging-client-test.png)
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.12.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.12.0/developing-aurora-scheduler.md b/source/documentation/0.12.0/developing-aurora-scheduler.md
new file mode 100644
index 0000000..cfab2aa
--- /dev/null
+++ b/source/documentation/0.12.0/developing-aurora-scheduler.md
@@ -0,0 +1,163 @@
+Java code in the aurora repo is built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+Developing Aurora UI
+======================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add `/vagrant/dist/resources/main` to the head of CLASSPATH. This path is configured
+as a shared filesystem to the path on the host system where your Aurora repository lives. This means
+that any updates to dist/resources/main in your checkout will be reflected immediately in the UI
+served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle 1.8's
+[Wrapper](http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.12.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.12.0/hooks.md b/source/documentation/0.12.0/hooks.md
new file mode 100644
index 0000000..28307ab
--- /dev/null
+++ b/source/documentation/0.12.0/hooks.md
@@ -0,0 +1,244 @@
+# Hooks for Aurora Client API
+
+- [Introduction](#introduction)
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+## Introduction
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+```python
+def pre_create(...):
+  do_something()  # if do_something fails with an exception, the create_job is not attempted!
+  return True
+
+# However...
+def pre_create(...):
+  try:
+    do_something()  # may cause exception
+  except Exception:  # generic error trap will catch it
+    pass  # and ignore the exception
+  return True  # create_job will run in any case!
+```
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+```python
+jobs = [
+        Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+       ]
+jobs.extend(
+   Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+   # Hooks disabled for these jobs
+```
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+```python
+# Defines a method pre_kill_job
+class KillConfirmer(object):
+  def confirm(self, msg):
+    return raw_input(msg).lower() == 'yes'
+
+  def pre_kill_job(self, job_key, shards=None):
+    shards = ('shards %s' % shards) if shards is not None else 'all shards'
+    return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                        % (job_key, shards))
+```
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+```python
+generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+```
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+```python
+# Overrides the standard do-nothing generic_hook by adding a log writing operation.
+from twitter.common import log
+  class Logger(object):
+    '''Adds to the log every time a hookable API method is called'''
+    def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+      log.info('%s: %s_%s of %s'
+               % (self.__class__.__name__, event, method_name, hook_config.job_key))
+```
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+```python
+hooks = []
+```
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+```python
+hooks = [Object_hook_definer1, Object_hook_definer2]
+```
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.12.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.12.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/CompletedTasks.png b/source/documentation/0.12.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.12.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/HelloWorldJob.png b/source/documentation/0.12.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.12.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/RoleJobs.png b/source/documentation/0.12.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.12.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/RunningJob.png b/source/documentation/0.12.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.12.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/ScheduledJobs.png b/source/documentation/0.12.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.12.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/TaskBreakdown.png b/source/documentation/0.12.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.12.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.12.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.12.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.12.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.12.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/components.odg b/source/documentation/0.12.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.12.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.12.0/images/components.png b/source/documentation/0.12.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.12.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/debug-client-test.png b/source/documentation/0.12.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.12.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/debugging-client-test.png b/source/documentation/0.12.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.12.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/killedtask.png b/source/documentation/0.12.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.12.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.12.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.12.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.12.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.12.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.12.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.12.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.12.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.12.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.12.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.12.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.12.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.12.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/runningtask.png b/source/documentation/0.12.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.12.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/stderr.png b/source/documentation/0.12.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.12.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.12.0/images/stdout.png b/source/documentation/0.12.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.12.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.12.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.12.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.12.0/index.html.md b/source/documentation/0.12.0/index.html.md
new file mode 100644
index 0000000..470295d
--- /dev/null
+++ b/source/documentation/0.12.0/index.html.md
@@ -0,0 +1,44 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.12.0/vagrant/)
+ * [Hello World Tutorial](/documentation/0.12.0/tutorial/)
+ * [User Guide](/documentation/0.12.0/user-guide/)
+ * [Configuration Tutorial](/documentation/0.12.0/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.12.0/configuration-reference/)
+ * [Command Line Client](/documentation/0.12.0/client-commands/)
+ * [Client cluster configuration](/documentation/0.12.0/client-cluster-configuration/)
+ * [Cron Jobs](/documentation/0.12.0/cron-jobs/)
+
+## Operators
+ * [Installation](/documentation/0.12.0/installing/)
+ * [Deployment and cluster configuration](/documentation/0.12.0/deploying-aurora-scheduler/)
+ * [Security](/documentation/0.12.0/security/)
+ * [Monitoring](/documentation/0.12.0/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.12.0/hooks/)
+ * [Scheduler Storage](/documentation/0.12.0/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.12.0/storage-config/)
+ * [SLA Measurement](/documentation/0.12.0/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.12.0/resources/)
+
+## Developers
+ * [Contributing to the project](contributing/)
+ * [Developing the Aurora Scheduler](/documentation/0.12.0/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.12.0/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.12.0/committers/)
+ * [Design Documents](/documentation/0.12.0/design-documents/)
+ * [Deprecation Guide](/documentation/0.12.0/thrift-deprecation/)
+ * [Build System](/documentation/0.12.0/build-system/)
+ * [Generating test resources](/documentation/0.12.0/test-resource-generation/)
+
+
+## Additional Resources
+ * [Tools integrating with Aurora](/documentation/0.12.0/tools/)
+ * [Presentation videos and slides](/documentation/0.12.0/presentations/)
diff --git a/source/documentation/0.12.0/installing.md b/source/documentation/0.12.0/installing.md
new file mode 100644
index 0000000..f8a96ee
--- /dev/null
+++ b/source/documentation/0.12.0/installing.md
@@ -0,0 +1,279 @@
+# Installing Aurora
+
+- [Components](#components)
+    - [Machine profiles](#machine-profiles)
+      - [Coordinator](#coordinator)
+      - [Worker](#worker)
+      - [Client](#client)
+- [Getting Aurora](#getting-aurora)
+    - [Building your own binary packages](#building-your-own-binary-packages)
+    - [RPMs](#rpms)
+- [Installing the scheduler](#installing-the-scheduler)
+    - [Ubuntu Trusty](#ubuntu-trusty)
+    - [CentOS 7](#centos-7)
+    - [Finalizing](#finalizing)
+    - [Configuration](#configuration)
+- [Installing worker components](#installing-worker-components)
+    - [Ubuntu Trusty](#ubuntu-trusty-1)
+    - [CentOS 7](#centos-7-1)
+    - [Configuration](#configuration-1)
+- [Installing the client](#installing-the-client)
+    - [Ubuntu Trusty](#ubuntu-trusty-2)
+    - [CentOS 7](#centos-7-2)
+    - [Configuration](#configuration-2)
+- [See also](#see-also)
+- [Installing Mesos](#installing-mesos)
+    - [Mesos on Ubuntu Trusty](#mesos-on-ubuntu-trusty)
+    - [Mesos on CentOS 7](#mesos-on-centos-7)
+
+## Components
+Before installing Aurora, it's important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](images/components.png)
+
+* **Aurora scheduler**  
+  The scheduler will be your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**  
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler.
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**  
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.  You can find lots more detail on the executor and Thermos in the
+  [user guide](/documentation/0.12.0/user-guide/).
+
+* **Aurora observer**  
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**  
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.
+
+* **Mesos master**  
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**  
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+## Machine profiles
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+## Getting Aurora
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+### Building your own binary packages
+Our package build toolchain makes it easy to build your own packages if you would like.  See the
+[instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+### RPMs
+We currently have work in progress to provide official RPMs.  As of this writing, the suggested way
+to get RPMs is to [build them](#building-your-own-binary-packages).
+
+We do have unofficial experimental RPMs available for testing purposes.
+
+**Use these RPMs at your own risk, they are not officially released under the ASF guidelines.**
+
+    echo '[apache-aurora-wfarner]
+    name=Apache Aurora distribution maintained by wfarner
+    baseurl=http://people.apache.org/~wfarner/aurora/distributions/0.9.0/rpm/centos-7/x86_64/
+    gpgcheck = 0' | sudo tee /etc/yum.repos.d/apache-aurora-wfarner.repo > /dev/null
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        wget -c https://apache.bintray.com/aurora/aurora-scheduler_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora'.
+        sudo yum install -y aurora-scheduler
+
+Note: if you are using the unreleased 0.9.0 RPM, you will need to edit `/etc/sysconfig/aurora`:  
+Change  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos/master'`  
+To  
+`-mesos_master_address='zk://127.0.0.1:2181/mesos'`  
+And  
+`-native_log_file_path='/var/lib/aurora/db'`  
+To  
+`-native_log_file_path='/var/lib/aurora/scheduler/db'`
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.  
+Ubuntu: `sudo stop aurora-scheduler`  
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.  
+Ubuntu: `sudo start aurora-scheduler`  
+CentOS: `sudo systemctl start aurora`
+
+### Configuration
+For more detail on this topic, see the dedicated page on
+[deploying the scheduler](/documentation/0.12.0/deploying-aurora-scheduler/)
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-slave
+
+2. Install Aurora executor and observer
+
+        wget -c https://apache.bintray.com/aurora/aurora-executor_0.10.0-1_amd64.deb
+        sudo dpkg -i aurora-executor_0.10.0-1_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos  
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer  
+   If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+        # Note: for older Aurora RPM versions, this may be called 'aurora-thermos'.
+        sudo yum install -y aurora-executor
+
+### Configuration
+The executor and observer typically do not require much configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget https://apache.bintray.com/aurora/aurora-tools_0.10.0-1_amd64.deb
+    sudo dpkg -i aurora-tools_0.10.0-1_amd64.deb
+
+### CentOS 7
+If you haven't already, read the section on [how to get Aurora RPMs](#rpms).
+
+    # Note: for older Aurora RPM versions, this may be called 'aurora-client'.
+    sudo yum install -y aurora-tools
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](/documentation/0.12.0/configuration-reference/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+## See also
+We have other docs that you will find useful once you have your cluster up and running:
+
+- [Monitor](/documentation/0.12.0/monitoring/) your cluster
+- Enable scheduler [security](/documentation/0.12.0/security/)
+- View job SLA [statistics](/documentation/0.12.0/sla/)
+- Understand the internals of the scheduler's [storage](/documentation/0.12.0/storage/)
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and slave.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-get update
+    sudo apt-get install -y software-properties-common
+    sudo add-apt-repository ppa:openjdk-r/ppa -y
+    sudo apt-get update
+
+    sudo apt-get install -y wget libsvn1 libcurl3 openjdk-8-jre-headless
+
+    # NOTE: This appears to be a missing dependency of the mesos deb package.
+    sudo apt-get install -y libcurl4-nss-dev
+
+    wget -c http://downloads.mesosphere.io/master/ubuntu/14.04/mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+    sudo dpkg -i mesos_0.23.0-1.0.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh http://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum install -y mesos-0.22.0
diff --git a/source/documentation/0.12.0/monitoring.md b/source/documentation/0.12.0/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.12.0/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.12.0/presentations.md b/source/documentation/0.12.0/presentations.md
new file mode 100644
index 0000000..a6082a8
--- /dev/null
+++ b/source/documentation/0.12.0/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.12.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.12.0/resources.md b/source/documentation/0.12.0/resources.md
new file mode 100644
index 0000000..50b8eeb
--- /dev/null
+++ b/source/documentation/0.12.0/resources.md
@@ -0,0 +1,164 @@
+Resources and Sizing
+=============================
+
+- [Introduction](#introduction)
+- [CPU Isolation](#cpu-isolation)
+- [CPU Sizing](#cpu-sizing)
+- [Memory Isolation](#memory-isolation)
+- [Memory Sizing](#memory-sizing)
+- [Disk Space](#disk-space)
+- [Disk Space Sizing](#disk-space-sizing)
+- [Other Resources](#other-resources)
+- [Resource Quota](#resource-quota)
+- [Task Preemption](#task-preemption)
+
+## Introduction
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+Let's look at each resource type in more detail:
+
+## CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+## CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+## Resource Quota
+
+Aurora requires resource quotas for
+[production non-dedicated jobs](/documentation/0.12.0/configuration-reference/#job-objects). Quota is enforced at
+the job role level and when set, defines a non-preemptible pool of compute resources within
+that role.
+
+To grant quota to a particular role in production use `aurora_admin set_quota` command.
+
+NOTE: all job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](/documentation/0.12.0/deploying-aurora-scheduler/#dedicated-attribute).
+
+## Task preemption
+
+Under a particular resource shortage pressure, tasks from
+[production](/documentation/0.12.0/configuration-reference/#job-objects) jobs may preempt tasks from any non-production
+job. A production task may only be preempted by tasks from production jobs in the same role with
+higher [priority](/documentation/0.12.0/configuration-reference/#job-objects).
\ No newline at end of file
diff --git a/source/documentation/0.12.0/security.md b/source/documentation/0.12.0/security.md
new file mode 100644
index 0000000..32bea42
--- /dev/null
+++ b/source/documentation/0.12.0/security.md
@@ -0,0 +1,279 @@
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure.
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available.
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.12.0/sla.md b/source/documentation/0.12.0/sla.md
new file mode 100644
index 0000000..ac3af09
--- /dev/null
+++ b/source/documentation/0.12.0/sla.md
@@ -0,0 +1,177 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config). It can be enabled for
+non-production services via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.12.0/storage-config.md b/source/documentation/0.12.0/storage-config.md
new file mode 100644
index 0000000..2a7623e
--- /dev/null
+++ b/source/documentation/0.12.0/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.12.0/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.12.0/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize --path=<-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.12.0/storage.md b/source/documentation/0.12.0/storage.md
new file mode 100644
index 0000000..80a95eb
--- /dev/null
+++ b/source/documentation/0.12.0/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.12.0/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.12.0/test-resource-generation.md b/source/documentation/0.12.0/test-resource-generation.md
new file mode 100644
index 0000000..a12018a
--- /dev/null
+++ b/source/documentation/0.12.0/test-resource-generation.md
@@ -0,0 +1,24 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.12.0/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.12.0/thrift-deprecation.md b/source/documentation/0.12.0/thrift-deprecation.md
new file mode 100644
index 0000000..77ef600
--- /dev/null
+++ b/source/documentation/0.12.0/thrift-deprecation.md
@@ -0,0 +1,50 @@
+# Thrift API Changes
+
+## Overview
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+## Checklist
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+## Deprecation cycle
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used
+* Check [storage.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/storage.thrift)) to see if the
+affected struct is stored in Aurora scheduler storage. If so, you most likely need to backfill
+existing data to ensure both fields are populated eagerly on startup
+See [StorageBackfill.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/storage/StorageBackfill.java))
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove the deprecated Thrift field
+
+## Testing
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](/documentation/0.12.0/vagrant/)
+for more.
+
diff --git a/source/documentation/0.12.0/tools.md b/source/documentation/0.12.0/tools.md
new file mode 100644
index 0000000..2ae550d
--- /dev/null
+++ b/source/documentation/0.12.0/tools.md
@@ -0,0 +1,16 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balacing technology used to direct traffic to services running on Aurora
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performing HTTP redirects for easy developers and administor access
+
+* Monitoring
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packaes
diff --git a/source/documentation/0.12.0/tutorial.md b/source/documentation/0.12.0/tutorial.md
new file mode 100644
index 0000000..87ee70a
--- /dev/null
+++ b/source/documentation/0.12.0/tutorial.md
@@ -0,0 +1,260 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](/documentation/0.12.0/vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](/documentation/0.12.0/installing/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.12.0/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.12.0/configuration-reference/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the slave machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](/documentation/0.12.0/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.12.0/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.12.0/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.12.0/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.12.0/client-commands/) document.
diff --git a/source/documentation/0.12.0/user-guide.md b/source/documentation/0.12.0/user-guide.md
new file mode 100644
index 0000000..55a5d3c
--- /dev/null
+++ b/source/documentation/0.12.0/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.12.0/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora update start <job_key_value>/0-1 new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.12.0/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.12.0/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.12.0/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.12.0/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.12.0/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora job open`:
+
+      aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.12.0/client-commands/).
diff --git a/source/documentation/0.12.0/vagrant.md b/source/documentation/0.12.0/vagrant.md
new file mode 100644
index 0000000..e34461f
--- /dev/null
+++ b/source/documentation/0.12.0/vagrant.md
@@ -0,0 +1,137 @@
+Getting Started
+===============
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](/documentation/0.12.0/tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/0.13.0/additional-resources/presentations.md b/source/documentation/0.13.0/additional-resources/presentations.md
new file mode 100644
index 0000000..70623c6
--- /dev/null
+++ b/source/documentation/0.13.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="../images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="../images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.13.0/additional-resources/tools.md b/source/documentation/0.13.0/additional-resources/tools.md
new file mode 100644
index 0000000..044bad7
--- /dev/null
+++ b/source/documentation/0.13.0/additional-resources/tools.md
@@ -0,0 +1,21 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
diff --git a/source/documentation/0.13.0/contributing.md b/source/documentation/0.13.0/contributing.md
new file mode 100644
index 0000000..0a9b9d4
--- /dev/null
+++ b/source/documentation/0.13.0/contributing.md
@@ -0,0 +1,91 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Bill Farner (wfarner) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.13.0/development/client.md b/source/documentation/0.13.0/development/client.md
new file mode 100644
index 0000000..41d44be
--- /dev/null
+++ b/source/documentation/0.13.0/development/client.md
@@ -0,0 +1,81 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos agents, and an aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.13.0/development/committers-guide.md b/source/documentation/0.13.0/development/committers-guide.md
new file mode 100644
index 0000000..7974f32
--- /dev/null
+++ b/source/documentation/0.13.0/development/committers-guide.md
@@ -0,0 +1,86 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). Now address any issues and go back to
+step #1 and run again, this time you will use the -r flag to increment the release candidate
+version. This will automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.13.0/development/db-migration.md b/source/documentation/0.13.0/development/db-migration.md
new file mode 100644
index 0000000..f0baedf
--- /dev/null
+++ b/source/documentation/0.13.0/development/db-migration.md
@@ -0,0 +1,33 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, a new migration class should be created under the
+org.apache.aurora.scheduler.storage.db.migration package. The class should implement the
+[MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.13.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by adding the changes from migrations directly to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), removing
+the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.13.0/development/design-documents.md b/source/documentation/0.13.0/development/design-documents.md
new file mode 100644
index 0000000..93b56f4
--- /dev/null
+++ b/source/documentation/0.13.0/development/design-documents.md
@@ -0,0 +1,20 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Health Checks for Updates](https://docs.google.com/document/d/1ZdgW8S4xMhvKW7iQUX99xZm10NXSxEWR0a-21FP5d94/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.13.0/development/design/command-hooks.md b/source/documentation/0.13.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.13.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.13.0/development/scheduler.md b/source/documentation/0.13.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.13.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.13.0/development/thermos.md b/source/documentation/0.13.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.13.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.13.0/development/thrift.md b/source/documentation/0.13.0/development/thrift.md
new file mode 100644
index 0000000..ce65a89
--- /dev/null
+++ b/source/documentation/0.13.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.13.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.13.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.13.0/development/ui.md b/source/documentation/0.13.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.13.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.13.0/features/constraints.md b/source/documentation/0.13.0/features/constraints.md
new file mode 100644
index 0000000..082efb0
--- /dev/null
+++ b/source/documentation/0.13.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random slave with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos slave with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos slave `--attributes` command line argument can be used to mark slaves with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos slave. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos slaves configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos slaves configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
diff --git a/source/documentation/0.13.0/features/containers.md b/source/documentation/0.13.0/features/containers.md
new file mode 100644
index 0000000..9979d5a
--- /dev/null
+++ b/source/documentation/0.13.0/features/containers.md
@@ -0,0 +1,43 @@
+Containers
+==========
+
+
+Docker
+------
+
+Aurora has optional support for launching Docker containers, if correctly [configured by an Operator](../../operations/configuration/#docker-containers).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+
+    $ cat /vagrant/examples/jobs/docker/hello_docker.aurora
+    hello_docker = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    hello_world_docker = Task(
+      name = 'hello docker',
+      processes = [hello_world_proc],
+      resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'docker-test',
+        name = 'hello_docker',
+        task = hello_world_docker,
+        container = Container(docker = Docker(image = 'python:2.7'))
+      )
+    ]
+
+
+In order to correctly execute processes inside a job, the docker container must have Python 2.7
+installed. Further details of how to use Docker can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object).
diff --git a/source/documentation/0.13.0/features/cron-jobs.md b/source/documentation/0.13.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.13.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.13.0/features/job-updates.md b/source/documentation/0.13.0/features/job-updates.md
new file mode 100644
index 0000000..319a047
--- /dev/null
+++ b/source/documentation/0.13.0/features/job-updates.md
@@ -0,0 +1,111 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.13.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.13.0/features/multitenancy.md b/source/documentation/0.13.0/features/multitenancy.md
new file mode 100644
index 0000000..924e928
--- /dev/null
+++ b/source/documentation/0.13.0/features/multitenancy.md
@@ -0,0 +1,62 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is non-[production](../../reference/configuration/#job-objects) and the candidate is
+   [production](../../reference/configuration/#job-objects).
+
+In other words, tasks from [production](../../reference/configuration/#job-objects) jobs may preempt
+tasks from any non-production job. However, a production task may only be preempted by tasks from
+production jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.13.0/features/resource-isolation.md b/source/documentation/0.13.0/features/resource-isolation.md
new file mode 100644
index 0000000..0ba664b
--- /dev/null
+++ b/source/documentation/0.13.0/features/resource-isolation.md
@@ -0,0 +1,167 @@
+Resources Isolation and Sizing
+==============================
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+
+Oversubscription
+----------------
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+The Aurora scheduler must be configured to receive revocable offers from Mesos and accept revocable
+jobs. If not configured properly revocable tasks will never get assigned to hosts and will stay in
+`PENDING`. Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path (unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.13.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)):
+
+    -tier_config=path/to/tiers/config.json
+
+
+See the [Configuration Reference](../../references/configuration/) for details on how to mark a job
+as being revocable.
diff --git a/source/documentation/0.13.0/features/service-discovery.md b/source/documentation/0.13.0/features/service-discovery.md
new file mode 100644
index 0000000..4636ebf
--- /dev/null
+++ b/source/documentation/0.13.0/features/service-discovery.md
@@ -0,0 +1,44 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.twitterscheduler.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.twitterscheduler.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.twitterscheduler.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. The `twitterscheduler` part is the lower-case of framework name, which is not configurable right now (see
+   [TWITTER_SCHEDULER_NAME](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/mesos/CommandLineDriverSettingsModule.java#L98));
+3. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
\ No newline at end of file
diff --git a/source/documentation/0.13.0/features/services.md b/source/documentation/0.13.0/features/services.md
new file mode 100644
index 0000000..7d7d5e1
--- /dev/null
+++ b/source/documentation/0.13.0/features/services.md
@@ -0,0 +1,99 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.13.0/features/sla-metrics.md b/source/documentation/0.13.0/features/sla-metrics.md
new file mode 100644
index 0000000..932f214
--- /dev/null
+++ b/source/documentation/0.13.0/features/sla-metrics.md
@@ -0,0 +1,178 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.13.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.13.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.13.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.13.0/getting-started/overview.md b/source/documentation/0.13.0/getting-started/overview.md
new file mode 100644
index 0000000..dbfcd39
--- /dev/null
+++ b/source/documentation/0.13.0/getting-started/overview.md
@@ -0,0 +1,110 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.13.0/getting-started/tutorial.md b/source/documentation/0.13.0/getting-started/tutorial.md
new file mode 100644
index 0000000..e15c102
--- /dev/null
+++ b/source/documentation/0.13.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the slave machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.13.0/getting-started/vagrant.md b/source/documentation/0.13.0/getting-started/vagrant.md
new file mode 100644
index 0000000..34de280
--- /dev/null
+++ b/source/documentation/0.13.0/getting-started/vagrant.md
@@ -0,0 +1,137 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.13.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.13.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/CompletedTasks.png b/source/documentation/0.13.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.13.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/HelloWorldJob.png b/source/documentation/0.13.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.13.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/RoleJobs.png b/source/documentation/0.13.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.13.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/RunningJob.png b/source/documentation/0.13.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.13.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/ScheduledJobs.png b/source/documentation/0.13.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.13.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/TaskBreakdown.png b/source/documentation/0.13.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.13.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.13.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.13.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.13.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.13.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/components.odg b/source/documentation/0.13.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.13.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.13.0/images/components.png b/source/documentation/0.13.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.13.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/debug-client-test.png b/source/documentation/0.13.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.13.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/debugging-client-test.png b/source/documentation/0.13.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.13.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/killedtask.png b/source/documentation/0.13.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.13.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.13.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.13.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.13.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.13.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.13.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.13.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.13.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.13.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.13.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.13.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.13.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.13.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/runningtask.png b/source/documentation/0.13.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.13.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/stderr.png b/source/documentation/0.13.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.13.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.13.0/images/stdout.png b/source/documentation/0.13.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.13.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.13.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.13.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.13.0/index.html.md b/source/documentation/0.13.0/index.html.md
new file mode 100644
index 0000000..2a43162
--- /dev/null
+++ b/source/documentation/0.13.0/index.html.md
@@ -0,0 +1,73 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.13.0/operations/backup-restore.md b/source/documentation/0.13.0/operations/backup-restore.md
new file mode 100644
index 0000000..8dac40a
--- /dev/null
+++ b/source/documentation/0.13.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+# Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+# Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+# Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+# Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+# Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.13.0/operations/configuration.md b/source/documentation/0.13.0/operations/configuration.md
new file mode 100644
index 0000000..1d7ab30
--- /dev/null
+++ b/source/documentation/0.13.0/operations/configuration.md
@@ -0,0 +1,187 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    # Port used to communicate with the Mesos master and for the replicated log
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.13.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+### `-backup_interval`
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+### `-backup_dir`
+Directory to write backups to.
+
+### `-max_saved_backups`
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object configuration
+allows specifying alternate log file destinations like streamed stdout/stderr or suppression of all log output.
+Default behavior can be configured for the entire cluster with the following flag (through the `-thermos_executor_flags`
+argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+-thermos_executor_flags argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the thermos executor should be invoked
+ inside a wrapper script. In such a case, the aurora scheduler should be started with
+ `-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources`
+ set to a comma separated string of all the resources that should be copied into
+ the sandbox (including the original thermos executor).
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+
+
+### Docker containers
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to
+pass to use the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
diff --git a/source/documentation/0.13.0/operations/installation.md b/source/documentation/0.13.0/operations/installation.md
new file mode 100644
index 0000000..8aef374
--- /dev/null
+++ b/source/documentation/0.13.0/operations/installation.md
@@ -0,0 +1,324 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.12.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.12.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos slave
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+NB: In Aurora releases up through 0.12.0, you'll also need to edit /etc/init/thermos.conf like so:
+
+    diff -C 1 /etc/init/thermos.conf.orig /etc/init/thermos.conf
+    *** /etc/init/thermos.conf.orig 2016-03-22 22:34:46.286199718 +0000
+    --- /etc/init/thermos.conf  2016-03-22 17:09:49.357689038 +0000
+    ***************
+    *** 24,25 ****
+    --- 24,26 ----
+          --port=${OBSERVER_PORT:-1338} \
+    +     --mesos-root=${MESOS_ROOT:-/var/lib/mesos} \
+          --log_to_disk=NONE \
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos-observer
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.12.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.12.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and slave.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=0.25.0-0.2.70.ubuntu1404
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-0.25.0
+
+
+
+## Troubleshooting
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+### Scheduler not running
+
+### Symptom
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+## Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/) or [supervisord](http://supervisord.org/) to run the scheduler
+and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
diff --git a/source/documentation/0.13.0/operations/monitoring.md b/source/documentation/0.13.0/operations/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.13.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.13.0/operations/security.md b/source/documentation/0.13.0/operations/security.md
new file mode 100644
index 0000000..46e0b8a
--- /dev/null
+++ b/source/documentation/0.13.0/operations/security.md
@@ -0,0 +1,340 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
\ No newline at end of file
diff --git a/source/documentation/0.13.0/operations/storage.md b/source/documentation/0.13.0/operations/storage.md
new file mode 100644
index 0000000..874dfe6
--- /dev/null
+++ b/source/documentation/0.13.0/operations/storage.md
@@ -0,0 +1,97 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Replicated Log Configuration](#replicated-log-configuration)
+- [Backup Configuration](#replicated-log-configuration)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.13.0/reference/client-cluster-configuration.md b/source/documentation/0.13.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..8891f4d
--- /dev/null
+++ b/source/documentation/0.13.0/reference/client-cluster-configuration.md
@@ -0,0 +1,93 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to mesos slave work dir (Required)
+   **slave_run_directory** | String   | Name of mesos slave run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the mesos slaves where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
diff --git a/source/documentation/0.13.0/reference/client-commands.md b/source/documentation/0.13.0/reference/client-commands.md
new file mode 100644
index 0000000..1323e39
--- /dev/null
+++ b/source/documentation/0.13.0/reference/client-commands.md
@@ -0,0 +1,326 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.13.0/reference/client-hooks.md b/source/documentation/0.13.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.13.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.13.0/reference/configuration-best-practices.md b/source/documentation/0.13.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.13.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.13.0/reference/configuration-templating.md b/source/documentation/0.13.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.13.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.13.0/reference/configuration-tutorial.md b/source/documentation/0.13.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..b6d49b3
--- /dev/null
+++ b/source/documentation/0.13.0/reference/configuration-tutorial.md
@@ -0,0 +1,511 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.13.0/reference/configuration.md b/source/documentation/0.13.0/reference/configuration.md
new file mode 100644
index 0000000..af3813d
--- /dev/null
+++ b/source/documentation/0.13.0/reference/configuration.md
@@ -0,0 +1,573 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store  stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically rotate logs
+after they grow to a certain size, which can prevent your job from using more than its allocated
+disk space.
+
+A Logger union consists of a destination enum, a mode enum and a rotation policy.
+It's to set where the process logs should be sent using `destination`. Default
+option is `file`. Its also possible to specify `console` to get logs output
+to stdout/stderr, `none` to suppress any logs output or `both` to send logs to files and
+console output. In case of using `none` or `console` rotation attributes are ignored.
+Rotation policies only apply to loggers whose mode is `rotate`. The acceptable values
+for the LoggerMode enum are `standard` and `rotate`. The rotation policy applies to both
+stderr and stdout.
+
+By default, all processes use the `standard` LoggerMode.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy.
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate`. It is ignored
+otherwise.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. The `revocable` tier requires the task to run with Mesos revocable resources. Setting the task's tier to `preemptible` allows for the possibility of that task being preempted by other tasks when cluster is running low on resources. The `preferred` tier prevents the task from using revocable resources and from being preempted. Since it is possible that a cluster is configured with a custom tier configuration, users should consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+*Note: ```endpoint```, ```expected_response``` and ```expected_response_code``` are deprecated from ```HealthCheckConfig``` and must be definied in ```HttpHealthChecker```.*
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos slave. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos slave can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Objects
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` slave
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
diff --git a/source/documentation/0.13.0/reference/scheduler-configuration.md b/source/documentation/0.13.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..4199591
--- /dev/null
+++ b/source/documentation/0.13.0/reference/scheduler-configuration.md
@@ -0,0 +1,224 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 100)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_start_initial_backoff (default (1, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the mesos slaves.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a slave's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [org.apache.aurora.scheduler.app.MoreModules$1@2d3379b4])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-thermos_observer_root (default /var/run/thermos)
+	Path to the thermos observer root (by default /var/run/thermos.)
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.13.0/reference/scheduler-endpoints.md b/source/documentation/0.13.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..d302e90
--- /dev/null
+++ b/source/documentation/0.13.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is the list of all such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  200 (OK) is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of 503 (SERVICE_UNAVAILABLE) is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of 502
+  (BAD_GATEWAY) is returned.
\ No newline at end of file
diff --git a/source/documentation/0.13.0/reference/task-lifecycle.md b/source/documentation/0.13.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..1477364
--- /dev/null
+++ b/source/documentation/0.13.0/reference/task-lifecycle.md
@@ -0,0 +1,146 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the slave follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set slave into maintenance mode. This will transition
+all `Task` running on this slave into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other slaves for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.14.0/additional-resources/presentations.md b/source/documentation/0.14.0/additional-resources/presentations.md
new file mode 100644
index 0000000..0211318
--- /dev/null
+++ b/source/documentation/0.14.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.14.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.14.0/additional-resources/tools.md b/source/documentation/0.14.0/additional-resources/tools.md
new file mode 100644
index 0000000..044bad7
--- /dev/null
+++ b/source/documentation/0.14.0/additional-resources/tools.md
@@ -0,0 +1,21 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
diff --git a/source/documentation/0.14.0/contributing.md b/source/documentation/0.14.0/contributing.md
new file mode 100644
index 0000000..0a9b9d4
--- /dev/null
+++ b/source/documentation/0.14.0/contributing.md
@@ -0,0 +1,91 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Bill Farner (wfarner) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.14.0/development/client.md b/source/documentation/0.14.0/development/client.md
new file mode 100644
index 0000000..079c471
--- /dev/null
+++ b/source/documentation/0.14.0/development/client.md
@@ -0,0 +1,81 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.14.0/development/committers-guide.md b/source/documentation/0.14.0/development/committers-guide.md
new file mode 100644
index 0000000..7974f32
--- /dev/null
+++ b/source/documentation/0.14.0/development/committers-guide.md
@@ -0,0 +1,86 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). Now address any issues and go back to
+step #1 and run again, this time you will use the -r flag to increment the release candidate
+version. This will automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.14.0/development/db-migration.md b/source/documentation/0.14.0/development/db-migration.md
new file mode 100644
index 0000000..0cb7bcf
--- /dev/null
+++ b/source/documentation/0.14.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.14.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.14.0/development/design-documents.md b/source/documentation/0.14.0/development/design-documents.md
new file mode 100644
index 0000000..5ada27f
--- /dev/null
+++ b/source/documentation/0.14.0/development/design-documents.md
@@ -0,0 +1,21 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1ZdgW8S4xMhvKW7iQUX99xZm10NXSxEWR0a-21FP5d94/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.14.0/development/design/command-hooks.md b/source/documentation/0.14.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.14.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.14.0/development/scheduler.md b/source/documentation/0.14.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.14.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.14.0/development/thermos.md b/source/documentation/0.14.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.14.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.14.0/development/thrift.md b/source/documentation/0.14.0/development/thrift.md
new file mode 100644
index 0000000..ec3cd3c
--- /dev/null
+++ b/source/documentation/0.14.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.14.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.14.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.14.0/development/ui.md b/source/documentation/0.14.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.14.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.14.0/features/constraints.md b/source/documentation/0.14.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.14.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.14.0/features/containers.md b/source/documentation/0.14.0/features/containers.md
new file mode 100644
index 0000000..15c8a59
--- /dev/null
+++ b/source/documentation/0.14.0/features/containers.md
@@ -0,0 +1,60 @@
+Containers
+==========
+
+Docker
+------
+
+Aurora has optional support for launching Docker containers, if correctly [configured by an Operator](../../operations/configuration/#docker-containers).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+
+    $ cat /vagrant/examples/jobs/docker/hello_docker.aurora
+    hello_world_proc = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    hello_world_docker = Task(
+      name = 'hello docker',
+      processes = [hello_world_proc],
+      resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'docker-test',
+        name = 'hello_docker',
+        task = hello_world_docker,
+        container = Container(docker = Docker(image = 'python:2.7'))
+      )
+    ]
+
+
+In order to correctly execute processes inside a job, the docker container must have Python 2.7
+installed. Further details of how to use Docker can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object).
+
+Mesos
+-----
+
+*Note: In order to use filesystem images with Aurora, you must be running at least Mesos 0.28.x*
+
+Aurora supports specifying a task filesystem image to use with the [Mesos containerizer](http://mesos.apache.org/documentation/latest/container-image/).
+This is done by setting the ```container``` property of the Job to a ```Mesos``` container object
+that includes the image to use. Both [AppC](https://github.com/appc/spec/blob/master/SPEC.md) and 
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images are supported.
+
+```
+job = Job(
+   ...
+   container = Mesos(image=DockerImage(name='my-image', tag='my-tag'))
+   ...
+)
+```
diff --git a/source/documentation/0.14.0/features/cron-jobs.md b/source/documentation/0.14.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.14.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.14.0/features/job-updates.md b/source/documentation/0.14.0/features/job-updates.md
new file mode 100644
index 0000000..de60e1f
--- /dev/null
+++ b/source/documentation/0.14.0/features/job-updates.md
@@ -0,0 +1,111 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.14.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.14.0/features/multitenancy.md b/source/documentation/0.14.0/features/multitenancy.md
new file mode 100644
index 0000000..924e928
--- /dev/null
+++ b/source/documentation/0.14.0/features/multitenancy.md
@@ -0,0 +1,62 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is non-[production](../../reference/configuration/#job-objects) and the candidate is
+   [production](../../reference/configuration/#job-objects).
+
+In other words, tasks from [production](../../reference/configuration/#job-objects) jobs may preempt
+tasks from any non-production job. However, a production task may only be preempted by tasks from
+production jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.14.0/features/resource-isolation.md b/source/documentation/0.14.0/features/resource-isolation.md
new file mode 100644
index 0000000..a1c3b98
--- /dev/null
+++ b/source/documentation/0.14.0/features/resource-isolation.md
@@ -0,0 +1,179 @@
+Resources Isolation and Sizing
+==============================
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 0.29.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). Until official documentation is released,
+see [Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+for more details.
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+Oversubscription
+----------------
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+The Aurora scheduler must be configured to receive revocable offers from Mesos and accept revocable
+jobs. If not configured properly revocable tasks will never get assigned to hosts and will stay in
+`PENDING`. Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path (unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.14.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)):
+
+    -tier_config=path/to/tiers/config.json
+
+
+See the [Configuration Reference](../../reference/configuration/) for details on how to mark a job
+as being revocable.
diff --git a/source/documentation/0.14.0/features/service-discovery.md b/source/documentation/0.14.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.14.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.14.0/features/services.md b/source/documentation/0.14.0/features/services.md
new file mode 100644
index 0000000..7d7d5e1
--- /dev/null
+++ b/source/documentation/0.14.0/features/services.md
@@ -0,0 +1,99 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.14.0/features/sla-metrics.md b/source/documentation/0.14.0/features/sla-metrics.md
new file mode 100644
index 0000000..c178c50
--- /dev/null
+++ b/source/documentation/0.14.0/features/sla-metrics.md
@@ -0,0 +1,178 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.14.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.14.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.14.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.14.0/features/webhooks.md b/source/documentation/0.14.0/features/webhooks.md
new file mode 100644
index 0000000..075aeec
--- /dev/null
+++ b/source/documentation/0.14.0/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
diff --git a/source/documentation/0.14.0/getting-started/overview.md b/source/documentation/0.14.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.14.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.14.0/getting-started/tutorial.md b/source/documentation/0.14.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.14.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.14.0/getting-started/vagrant.md b/source/documentation/0.14.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.14.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.14.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.14.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/CompletedTasks.png b/source/documentation/0.14.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.14.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/HelloWorldJob.png b/source/documentation/0.14.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.14.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/RoleJobs.png b/source/documentation/0.14.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.14.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/RunningJob.png b/source/documentation/0.14.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.14.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/ScheduledJobs.png b/source/documentation/0.14.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.14.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/TaskBreakdown.png b/source/documentation/0.14.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.14.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.14.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.14.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.14.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.14.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/components.odg b/source/documentation/0.14.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.14.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.14.0/images/components.png b/source/documentation/0.14.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.14.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/debug-client-test.png b/source/documentation/0.14.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.14.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/debugging-client-test.png b/source/documentation/0.14.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.14.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/killedtask.png b/source/documentation/0.14.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.14.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.14.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.14.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.14.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.14.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.14.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.14.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.14.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.14.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.14.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.14.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.14.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.14.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/runningtask.png b/source/documentation/0.14.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.14.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/stderr.png b/source/documentation/0.14.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.14.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.14.0/images/stdout.png b/source/documentation/0.14.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.14.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.14.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.14.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.14.0/index.html.md b/source/documentation/0.14.0/index.html.md
new file mode 100644
index 0000000..b770654
--- /dev/null
+++ b/source/documentation/0.14.0/index.html.md
@@ -0,0 +1,74 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.14.0/operations/backup-restore.md b/source/documentation/0.14.0/operations/backup-restore.md
new file mode 100644
index 0000000..8dac40a
--- /dev/null
+++ b/source/documentation/0.14.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+# Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+# Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+# Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+# Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+# Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.14.0/operations/configuration.md b/source/documentation/0.14.0/operations/configuration.md
new file mode 100644
index 0000000..95ea5ef
--- /dev/null
+++ b/source/documentation/0.14.0/operations/configuration.md
@@ -0,0 +1,302 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    # Port used to communicate with the Mesos master and for the replicated log
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.14.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+### `-backup_interval`
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+### `-backup_dir`
+Directory to write backups to.
+
+### `-max_saved_backups`
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object configuration
+allows specifying alternate log file destinations like streamed stdout/stderr or suppression of all log output.
+Default behavior can be configured for the entire cluster with the following flag (through the `-thermos_executor_flags`
+argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+-thermos_executor_flags argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the thermos executor should be invoked
+ inside a wrapper script. In such a case, the aurora scheduler should be started with
+ `-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources`
+ set to a comma separated string of all the resources that should be copied into
+ the sandbox (including the original thermos executor).
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executor


+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.

+
+The configuration file must be valid JSON and contain, at minimum, the name, command and resources fields.
+
+
+### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                  | Description
+------------------------  | ---------------------------------
+host_path (required)      | Host path to mount inside the container.
+container_path (required) | Path inside the container where `host_path` will be mounted.
+mode (required)           | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+
+A sample configuration is as follows:

+```
+     {
+       "executor": {
+         "name": "myExecutor",
+         "command": {
+           "value": "myExecutor.sh",
+           "arguments": [
+             "localhost:2181",
+             "-verbose",
+             "-config myConfiguration.config"
+           ],
+           "uris": [
+             {
+               "value": "/dist/myExecutor.sh",
+               "executable": true,
+               "extract": false,
+               "cache": true
+             },
+             {
+               "value": "/home/user/myConfiguration.config",
+               "executable": false,
+               "extract": false,
+               "cache": false
+             }
+           ]
+         },
+         "resources": [
+           {
+             "name": "cpus",
+             "type": "SCALAR",
+             "scalar": {
+               "value": 1.00
+             }
+           },
+           {
+             "name": "mem",
+             "type": "SCALAR",
+             "scalar": {
+               "value": 512
+             }
+           }
+         ]
+       },
+       "volume_mounts": [
+         {
+           "mode": "RO",
+           "container_path": "/path/on/container",
+           "host_path": "/path/to/host/directory"
+         },
+         {
+           "mode": "RW",
+           "container_path": "/container",
+           "host_path": "/host"
+         }
+       ]
+     }
+```
+
+It should be noted that if you do not use thermos or a thermos based executor, links in the scheduler's
+Web UI for tasks
 will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+Furthermore, this configuration replaces the default thermos executor.
+Work is in progress to allow support for multiple executors to co-exist within a single scheduler.
+
+### Docker containers
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the Mesos agents by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to
+pass to use the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
diff --git a/source/documentation/0.14.0/operations/installation.md b/source/documentation/0.14.0/operations/installation.md
new file mode 100644
index 0000000..4361379
--- /dev/null
+++ b/source/documentation/0.14.0/operations/installation.md
@@ -0,0 +1,324 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.12.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.12.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+NB: In Aurora releases up through 0.12.0, you'll also need to edit /etc/init/thermos.conf like so:
+
+    diff -C 1 /etc/init/thermos.conf.orig /etc/init/thermos.conf
+    *** /etc/init/thermos.conf.orig 2016-03-22 22:34:46.286199718 +0000
+    --- /etc/init/thermos.conf  2016-03-22 17:09:49.357689038 +0000
+    ***************
+    *** 24,25 ****
+    --- 24,26 ----
+          --port=${OBSERVER_PORT:-1338} \
+    +     --mesos-root=${MESOS_ROOT:-/var/lib/mesos} \
+          --log_to_disk=NONE \
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos-observer
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.12.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.12.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=0.25.0-0.2.70.ubuntu1404
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-0.25.0
+
+
+
+## Troubleshooting
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+### Scheduler not running
+
+### Symptom
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+## Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/) or [supervisord](http://supervisord.org/) to run the scheduler
+and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
diff --git a/source/documentation/0.14.0/operations/monitoring.md b/source/documentation/0.14.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.14.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.14.0/operations/security.md b/source/documentation/0.14.0/operations/security.md
new file mode 100644
index 0000000..46e0b8a
--- /dev/null
+++ b/source/documentation/0.14.0/operations/security.md
@@ -0,0 +1,340 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
\ No newline at end of file
diff --git a/source/documentation/0.14.0/operations/storage.md b/source/documentation/0.14.0/operations/storage.md
new file mode 100644
index 0000000..874dfe6
--- /dev/null
+++ b/source/documentation/0.14.0/operations/storage.md
@@ -0,0 +1,97 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Replicated Log Configuration](#replicated-log-configuration)
+- [Backup Configuration](#replicated-log-configuration)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.14.0/reference/client-cluster-configuration.md b/source/documentation/0.14.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..5e85802
--- /dev/null
+++ b/source/documentation/0.14.0/reference/client-cluster-configuration.md
@@ -0,0 +1,93 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
diff --git a/source/documentation/0.14.0/reference/client-commands.md b/source/documentation/0.14.0/reference/client-commands.md
new file mode 100644
index 0000000..36f0ecb
--- /dev/null
+++ b/source/documentation/0.14.0/reference/client-commands.md
@@ -0,0 +1,326 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.14.0/reference/client-hooks.md b/source/documentation/0.14.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.14.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.14.0/reference/configuration-best-practices.md b/source/documentation/0.14.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.14.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.14.0/reference/configuration-templating.md b/source/documentation/0.14.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.14.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.14.0/reference/configuration-tutorial.md b/source/documentation/0.14.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..dcfe505
--- /dev/null
+++ b/source/documentation/0.14.0/reference/configuration-tutorial.md
@@ -0,0 +1,511 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.14.0/reference/configuration.md b/source/documentation/0.14.0/reference/configuration.md
new file mode 100644
index 0000000..8a70a92
--- /dev/null
+++ b/source/documentation/0.14.0/reference/configuration.md
@@ -0,0 +1,605 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store  stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically rotate logs
+after they grow to a certain size, which can prevent your job from using more than its allocated
+disk space.
+
+A Logger union consists of a destination enum, a mode enum and a rotation policy.
+It's to set where the process logs should be sent using `destination`. Default
+option is `file`. Its also possible to specify `console` to get logs output
+to stdout/stderr, `none` to suppress any logs output or `both` to send logs to files and
+console output. In case of using `none` or `console` rotation attributes are ignored.
+Rotation policies only apply to loggers whose mode is `rotate`. The acceptable values
+for the LoggerMode enum are `standard` and `rotate`. The rotation policy applies to both
+stderr and stdout.
+
+By default, all processes use the `standard` LoggerMode.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy.
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate`. It is ignored
+otherwise.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. The `revocable` tier requires the task to run with Mesos revocable resources. Setting the task's tier to `preemptible` allows for the possibility of that task being preempted by other tasks when cluster is running low on resources. The `preferred` tier prevents the task from using revocable resources and from being preempted. Since it is possible that a cluster is configured with a custom tier configuration, users should consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Objects
+
+*Note: Both Docker and Mesos unified-container support are currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+  ```mesos```    | Mesos          | A mesos container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+
+### AppcImage
+
+*Note: In order to correctly execute processes inside a job, the filesystem image must include python 2.7.*
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+*Note: In order to correctly execute processes inside a job, the filesystem image must include python 2.7.*
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.14.0/reference/scheduler-configuration.md b/source/documentation/0.14.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..92ff474
--- /dev/null
+++ b/source/documentation/0.14.0/reference/scheduler-configuration.md
@@ -0,0 +1,234 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 100)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_start_initial_backoff (default (1, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the mesos slaves.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default TwitterScheduler)
+	Name used to register the Aurora framework with Mesos. Changing this value can be backwards incompatible. For details, see MESOS-703.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a slave's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [org.apache.aurora.scheduler.app.MoreModules$1@13c9d689])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-webhook_config [file must be readable]
+    File to configure a HTTP webhook to receive task state change events.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default false)
+	Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.14.0/reference/scheduler-endpoints.md b/source/documentation/0.14.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..d302e90
--- /dev/null
+++ b/source/documentation/0.14.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is the list of all such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  200 (OK) is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of 503 (SERVICE_UNAVAILABLE) is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of 502
+  (BAD_GATEWAY) is returned.
\ No newline at end of file
diff --git a/source/documentation/0.14.0/reference/task-lifecycle.md b/source/documentation/0.14.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..4dcb481
--- /dev/null
+++ b/source/documentation/0.14.0/reference/task-lifecycle.md
@@ -0,0 +1,146 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.15.0/additional-resources/presentations.md b/source/documentation/0.15.0/additional-resources/presentations.md
new file mode 100644
index 0000000..307dbec
--- /dev/null
+++ b/source/documentation/0.15.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.15.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.15.0/additional-resources/tools.md b/source/documentation/0.15.0/additional-resources/tools.md
new file mode 100644
index 0000000..044bad7
--- /dev/null
+++ b/source/documentation/0.15.0/additional-resources/tools.md
@@ -0,0 +1,21 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
diff --git a/source/documentation/0.15.0/contributing.md b/source/documentation/0.15.0/contributing.md
new file mode 100644
index 0000000..0a9b9d4
--- /dev/null
+++ b/source/documentation/0.15.0/contributing.md
@@ -0,0 +1,91 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Bill Farner (wfarner) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.15.0/development/client.md b/source/documentation/0.15.0/development/client.md
new file mode 100644
index 0000000..079c471
--- /dev/null
+++ b/source/documentation/0.15.0/development/client.md
@@ -0,0 +1,81 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.15.0/development/committers-guide.md b/source/documentation/0.15.0/development/committers-guide.md
new file mode 100644
index 0000000..7974f32
--- /dev/null
+++ b/source/documentation/0.15.0/development/committers-guide.md
@@ -0,0 +1,86 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). Now address any issues and go back to
+step #1 and run again, this time you will use the -r flag to increment the release candidate
+version. This will automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.15.0/development/db-migration.md b/source/documentation/0.15.0/development/db-migration.md
new file mode 100644
index 0000000..10945d8
--- /dev/null
+++ b/source/documentation/0.15.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.15.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.15.0/development/design-documents.md b/source/documentation/0.15.0/development/design-documents.md
new file mode 100644
index 0000000..5ada27f
--- /dev/null
+++ b/source/documentation/0.15.0/development/design-documents.md
@@ -0,0 +1,21 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1ZdgW8S4xMhvKW7iQUX99xZm10NXSxEWR0a-21FP5d94/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.15.0/development/design/command-hooks.md b/source/documentation/0.15.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.15.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.15.0/development/scheduler.md b/source/documentation/0.15.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.15.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.15.0/development/thermos.md b/source/documentation/0.15.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.15.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.15.0/development/thrift.md b/source/documentation/0.15.0/development/thrift.md
new file mode 100644
index 0000000..33f3674
--- /dev/null
+++ b/source/documentation/0.15.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.15.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.15.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.15.0/development/ui.md b/source/documentation/0.15.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.15.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.15.0/features/constraints.md b/source/documentation/0.15.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.15.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.15.0/features/containers.md b/source/documentation/0.15.0/features/containers.md
new file mode 100644
index 0000000..15c8a59
--- /dev/null
+++ b/source/documentation/0.15.0/features/containers.md
@@ -0,0 +1,60 @@
+Containers
+==========
+
+Docker
+------
+
+Aurora has optional support for launching Docker containers, if correctly [configured by an Operator](../../operations/configuration/#docker-containers).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+
+    $ cat /vagrant/examples/jobs/docker/hello_docker.aurora
+    hello_world_proc = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    hello_world_docker = Task(
+      name = 'hello docker',
+      processes = [hello_world_proc],
+      resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'docker-test',
+        name = 'hello_docker',
+        task = hello_world_docker,
+        container = Container(docker = Docker(image = 'python:2.7'))
+      )
+    ]
+
+
+In order to correctly execute processes inside a job, the docker container must have Python 2.7
+installed. Further details of how to use Docker can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object).
+
+Mesos
+-----
+
+*Note: In order to use filesystem images with Aurora, you must be running at least Mesos 0.28.x*
+
+Aurora supports specifying a task filesystem image to use with the [Mesos containerizer](http://mesos.apache.org/documentation/latest/container-image/).
+This is done by setting the ```container``` property of the Job to a ```Mesos``` container object
+that includes the image to use. Both [AppC](https://github.com/appc/spec/blob/master/SPEC.md) and 
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images are supported.
+
+```
+job = Job(
+   ...
+   container = Mesos(image=DockerImage(name='my-image', tag='my-tag'))
+   ...
+)
+```
diff --git a/source/documentation/0.15.0/features/cron-jobs.md b/source/documentation/0.15.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.15.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.15.0/features/job-updates.md b/source/documentation/0.15.0/features/job-updates.md
new file mode 100644
index 0000000..b027513
--- /dev/null
+++ b/source/documentation/0.15.0/features/job-updates.md
@@ -0,0 +1,111 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.15.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.15.0/features/mesos-fetcher.md b/source/documentation/0.15.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.15.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.15.0/features/multitenancy.md b/source/documentation/0.15.0/features/multitenancy.md
new file mode 100644
index 0000000..924e928
--- /dev/null
+++ b/source/documentation/0.15.0/features/multitenancy.md
@@ -0,0 +1,62 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is non-[production](../../reference/configuration/#job-objects) and the candidate is
+   [production](../../reference/configuration/#job-objects).
+
+In other words, tasks from [production](../../reference/configuration/#job-objects) jobs may preempt
+tasks from any non-production job. However, a production task may only be preempted by tasks from
+production jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.15.0/features/resource-isolation.md b/source/documentation/0.15.0/features/resource-isolation.md
new file mode 100644
index 0000000..a0b57a8
--- /dev/null
+++ b/source/documentation/0.15.0/features/resource-isolation.md
@@ -0,0 +1,179 @@
+Resources Isolation and Sizing
+==============================
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it implements isolation of:
+
+* CPU
+* memory
+* disk space
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos uses a quota based CPU scheduler (the *Completely Fair Scheduler*)
+to provide consistent and predictable performance.  This is effectively
+a guarantee of resources -- you receive at least what you requested, but
+also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 0.29.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). Until official documentation is released,
+see [Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+for more details.
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+Oversubscription
+----------------
+
+**WARNING**: This feature is currently in alpha status. Do not use it in production clusters!
+
+Mesos [supports a concept of revocable tasks](http://mesos.apache.org/documentation/latest/oversubscription/)
+by oversubscribing machine resources by the amount deemed safe to not affect the existing
+non-revocable tasks. Aurora now supports revocable jobs via a `tier` setting set to `revocable`
+value.
+
+The Aurora scheduler must be configured to receive revocable offers from Mesos and accept revocable
+jobs. If not configured properly revocable tasks will never get assigned to hosts and will stay in
+`PENDING`. Set these scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Specify a tier configuration file path (unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.15.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)):
+
+    -tier_config=path/to/tiers/config.json
+
+
+See the [Configuration Reference](../../reference/configuration/) for details on how to mark a job
+as being revocable.
diff --git a/source/documentation/0.15.0/features/service-discovery.md b/source/documentation/0.15.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.15.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.15.0/features/services.md b/source/documentation/0.15.0/features/services.md
new file mode 100644
index 0000000..7d7d5e1
--- /dev/null
+++ b/source/documentation/0.15.0/features/services.md
@@ -0,0 +1,99 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.15.0/features/sla-metrics.md b/source/documentation/0.15.0/features/sla-metrics.md
new file mode 100644
index 0000000..4fd14b4
--- /dev/null
+++ b/source/documentation/0.15.0/features/sla-metrics.md
@@ -0,0 +1,178 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.15.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.15.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/rel/0.15.0/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.15.0/features/webhooks.md b/source/documentation/0.15.0/features/webhooks.md
new file mode 100644
index 0000000..075aeec
--- /dev/null
+++ b/source/documentation/0.15.0/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
diff --git a/source/documentation/0.15.0/getting-started/overview.md b/source/documentation/0.15.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.15.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.15.0/getting-started/tutorial.md b/source/documentation/0.15.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.15.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.15.0/getting-started/vagrant.md b/source/documentation/0.15.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.15.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.15.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.15.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/CompletedTasks.png b/source/documentation/0.15.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.15.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/HelloWorldJob.png b/source/documentation/0.15.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.15.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/RoleJobs.png b/source/documentation/0.15.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.15.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/RunningJob.png b/source/documentation/0.15.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.15.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/ScheduledJobs.png b/source/documentation/0.15.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.15.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/TaskBreakdown.png b/source/documentation/0.15.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.15.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.15.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.15.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.15.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.15.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/components.odg b/source/documentation/0.15.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.15.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.15.0/images/components.png b/source/documentation/0.15.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.15.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/debug-client-test.png b/source/documentation/0.15.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.15.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/debugging-client-test.png b/source/documentation/0.15.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.15.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/killedtask.png b/source/documentation/0.15.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.15.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.15.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.15.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.15.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.15.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.15.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.15.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.15.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.15.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.15.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.15.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.15.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.15.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/runningtask.png b/source/documentation/0.15.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.15.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/stderr.png b/source/documentation/0.15.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.15.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.15.0/images/stdout.png b/source/documentation/0.15.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.15.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.15.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.15.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.15.0/index.html.md b/source/documentation/0.15.0/index.html.md
new file mode 100644
index 0000000..cec01e1
--- /dev/null
+++ b/source/documentation/0.15.0/index.html.md
@@ -0,0 +1,75 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Job Updates](features/job-updates/)
+ * [Mesos Fetcher](features/mesos-fetcher/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.15.0/operations/backup-restore.md b/source/documentation/0.15.0/operations/backup-restore.md
new file mode 100644
index 0000000..8dac40a
--- /dev/null
+++ b/source/documentation/0.15.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+# Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+# Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+# Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+# Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+# Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.15.0/operations/configuration.md b/source/documentation/0.15.0/operations/configuration.md
new file mode 100644
index 0000000..86a6603
--- /dev/null
+++ b/source/documentation/0.15.0/operations/configuration.md
@@ -0,0 +1,302 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    # Port used to communicate with the Mesos master and for the replicated log
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.15.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+### `-backup_interval`
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+### `-backup_dir`
+Directory to write backups to.
+
+### `-max_saved_backups`
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object configuration
+allows specifying alternate log file destinations like streamed stdout/stderr or suppression of all log output.
+Default behavior can be configured for the entire cluster with the following flag (through the `-thermos_executor_flags`
+argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+-thermos_executor_flags argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the thermos executor should be invoked
+ inside a wrapper script. In such a case, the aurora scheduler should be started with
+ `-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources`
+ set to a comma separated string of all the resources that should be copied into
+ the sandbox (including the original thermos executor).
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executor


+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.

+
+The configuration file must be valid JSON and contain, at minimum, the name, command and resources fields.
+
+
+### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                  | Description
+------------------------  | ---------------------------------
+host_path (required)      | Host path to mount inside the container.
+container_path (required) | Path inside the container where `host_path` will be mounted.
+mode (required)           | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+
+A sample configuration is as follows:

+```
+     {
+       "executor": {
+         "name": "myExecutor",
+         "command": {
+           "value": "myExecutor.sh",
+           "arguments": [
+             "localhost:2181",
+             "-verbose",
+             "-config myConfiguration.config"
+           ],
+           "uris": [
+             {
+               "value": "/dist/myExecutor.sh",
+               "executable": true,
+               "extract": false,
+               "cache": true
+             },
+             {
+               "value": "/home/user/myConfiguration.config",
+               "executable": false,
+               "extract": false,
+               "cache": false
+             }
+           ]
+         },
+         "resources": [
+           {
+             "name": "cpus",
+             "type": "SCALAR",
+             "scalar": {
+               "value": 1.00
+             }
+           },
+           {
+             "name": "mem",
+             "type": "SCALAR",
+             "scalar": {
+               "value": 512
+             }
+           }
+         ]
+       },
+       "volume_mounts": [
+         {
+           "mode": "RO",
+           "container_path": "/path/on/container",
+           "host_path": "/path/to/host/directory"
+         },
+         {
+           "mode": "RW",
+           "container_path": "/container",
+           "host_path": "/host"
+         }
+       ]
+     }
+```
+
+It should be noted that if you do not use thermos or a thermos based executor, links in the scheduler's
+Web UI for tasks
 will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+Furthermore, this configuration replaces the default thermos executor.
+Work is in progress to allow support for multiple executors to co-exist within a single scheduler.
+
+### Docker containers
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the Mesos agents by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to
+pass to use the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
diff --git a/source/documentation/0.15.0/operations/installation.md b/source/documentation/0.15.0/operations/installation.md
new file mode 100644
index 0000000..56dc8c8
--- /dev/null
+++ b/source/documentation/0.15.0/operations/installation.md
@@ -0,0 +1,324 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.12.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.12.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.12.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+NB: In Aurora releases up through 0.12.0, you'll also need to edit /etc/init/thermos.conf like so:
+
+    diff -C 1 /etc/init/thermos.conf.orig /etc/init/thermos.conf
+    *** /etc/init/thermos.conf.orig 2016-03-22 22:34:46.286199718 +0000
+    --- /etc/init/thermos.conf  2016-03-22 17:09:49.357689038 +0000
+    ***************
+    *** 24,25 ****
+    --- 24,26 ----
+          --port=${OBSERVER_PORT:-1338} \
+    +     --mesos-root=${MESOS_ROOT:-/var/lib/mesos} \
+          --log_to_disk=NONE \
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.12.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.12.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.12.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=0.25.0-0.2.70.ubuntu1404
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-0.25.0
+
+
+
+## Troubleshooting
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+### Scheduler not running
+
+### Symptom
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+## Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/) or [supervisord](http://supervisord.org/) to run the scheduler
+and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
diff --git a/source/documentation/0.15.0/operations/monitoring.md b/source/documentation/0.15.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.15.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.15.0/operations/security.md b/source/documentation/0.15.0/operations/security.md
new file mode 100644
index 0000000..46e0b8a
--- /dev/null
+++ b/source/documentation/0.15.0/operations/security.md
@@ -0,0 +1,340 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
\ No newline at end of file
diff --git a/source/documentation/0.15.0/operations/storage.md b/source/documentation/0.15.0/operations/storage.md
new file mode 100644
index 0000000..874dfe6
--- /dev/null
+++ b/source/documentation/0.15.0/operations/storage.md
@@ -0,0 +1,97 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Replicated Log Configuration](#replicated-log-configuration)
+- [Backup Configuration](#replicated-log-configuration)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.15.0/reference/client-cluster-configuration.md b/source/documentation/0.15.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..5e85802
--- /dev/null
+++ b/source/documentation/0.15.0/reference/client-cluster-configuration.md
@@ -0,0 +1,93 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
diff --git a/source/documentation/0.15.0/reference/client-commands.md b/source/documentation/0.15.0/reference/client-commands.md
new file mode 100644
index 0000000..36f0ecb
--- /dev/null
+++ b/source/documentation/0.15.0/reference/client-commands.md
@@ -0,0 +1,326 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.15.0/reference/client-hooks.md b/source/documentation/0.15.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.15.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.15.0/reference/configuration-best-practices.md b/source/documentation/0.15.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.15.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.15.0/reference/configuration-templating.md b/source/documentation/0.15.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.15.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.15.0/reference/configuration-tutorial.md b/source/documentation/0.15.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..dcfe505
--- /dev/null
+++ b/source/documentation/0.15.0/reference/configuration-tutorial.md
@@ -0,0 +1,511 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.15.0/reference/configuration.md b/source/documentation/0.15.0/reference/configuration.md
new file mode 100644
index 0000000..0592593
--- /dev/null
+++ b/source/documentation/0.15.0/reference/configuration.md
@@ -0,0 +1,607 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store  stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically rotate logs
+after they grow to a certain size, which can prevent your job from using more than its allocated
+disk space.
+
+A Logger union consists of a destination enum, a mode enum and a rotation policy.
+It's to set where the process logs should be sent using `destination`. Default
+option is `file`. Its also possible to specify `console` to get logs output
+to stdout/stderr, `none` to suppress any logs output or `both` to send logs to files and
+console output. In case of using `none` or `console` rotation attributes are ignored.
+Rotation policies only apply to loggers whose mode is `rotate`. The acceptable values
+for the LoggerMode enum are `standard` and `rotate`. The rotation policy applies to both
+stderr and stdout.
+
+By default, all processes use the `standard` LoggerMode.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy.
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate`. It is ignored
+otherwise.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. The `revocable` tier requires the task to run with Mesos revocable resources. Setting the task's tier to `preemptible` allows for the possibility of that task being preempted by other tasks when cluster is running low on resources. The `preferred` tier prevents the task from using revocable resources and from being preempted. Since it is possible that a cluster is configured with a custom tier configuration, users should consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts to schedule jobs with an unsupported tier will be rejected by the scheduler.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Objects
+
+*Note: Both Docker and Mesos unified-container support are currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+  ```mesos```    | Mesos          | A mesos container to use.
+
+### Docker Object
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the docker containerizer.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+
+### AppcImage
+
+*Note: In order to correctly execute processes inside a job, the filesystem image must include python 2.7.*
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+*Note: In order to correctly execute processes inside a job, the filesystem image must include python 2.7.*
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.15.0/reference/scheduler-configuration.md b/source/documentation/0.15.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..92ff474
--- /dev/null
+++ b/source/documentation/0.15.0/reference/scheduler-configuration.md
@@ -0,0 +1,234 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 100)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_start_initial_backoff (default (1, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the mesos slaves.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default TwitterScheduler)
+	Name used to register the Aurora framework with Mesos. Changing this value can be backwards incompatible. For details, see MESOS-703.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a slave's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [org.apache.aurora.scheduler.app.MoreModules$1@13c9d689])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-webhook_config [file must be readable]
+    File to configure a HTTP webhook to receive task state change events.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default false)
+	Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.15.0/reference/scheduler-endpoints.md b/source/documentation/0.15.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..d302e90
--- /dev/null
+++ b/source/documentation/0.15.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is the list of all such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  200 (OK) is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of 503 (SERVICE_UNAVAILABLE) is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of 502
+  (BAD_GATEWAY) is returned.
\ No newline at end of file
diff --git a/source/documentation/0.15.0/reference/task-lifecycle.md b/source/documentation/0.15.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..4dcb481
--- /dev/null
+++ b/source/documentation/0.15.0/reference/task-lifecycle.md
@@ -0,0 +1,146 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.16.0/additional-resources/presentations.md b/source/documentation/0.16.0/additional-resources/presentations.md
new file mode 100644
index 0000000..a59d37d
--- /dev/null
+++ b/source/documentation/0.16.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.16.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.16.0/additional-resources/tools.md b/source/documentation/0.16.0/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.16.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.16.0/contributing.md b/source/documentation/0.16.0/contributing.md
new file mode 100644
index 0000000..45e786e
--- /dev/null
+++ b/source/documentation/0.16.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Bill Farner (wfarner) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.16.0/development/client.md b/source/documentation/0.16.0/development/client.md
new file mode 100644
index 0000000..079c471
--- /dev/null
+++ b/source/documentation/0.16.0/development/client.md
@@ -0,0 +1,81 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.16.0/development/committers-guide.md b/source/documentation/0.16.0/development/committers-guide.md
new file mode 100644
index 0000000..5c377bc
--- /dev/null
+++ b/source/documentation/0.16.0/development/committers-guide.md
@@ -0,0 +1,102 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.16.0/development/db-migration.md b/source/documentation/0.16.0/development/db-migration.md
new file mode 100644
index 0000000..8a64465
--- /dev/null
+++ b/source/documentation/0.16.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.16.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.16.0/development/design-documents.md b/source/documentation/0.16.0/development/design-documents.md
new file mode 100644
index 0000000..b2c600a
--- /dev/null
+++ b/source/documentation/0.16.0/development/design-documents.md
@@ -0,0 +1,22 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1ZdgW8S4xMhvKW7iQUX99xZm10NXSxEWR0a-21FP5d94/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.16.0/development/design/command-hooks.md b/source/documentation/0.16.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.16.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.16.0/development/scheduler.md b/source/documentation/0.16.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.16.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.16.0/development/thermos.md b/source/documentation/0.16.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.16.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.16.0/development/thrift.md b/source/documentation/0.16.0/development/thrift.md
new file mode 100644
index 0000000..cd0c9a0
--- /dev/null
+++ b/source/documentation/0.16.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.16.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.16.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.16.0/development/ui.md b/source/documentation/0.16.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.16.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.16.0/features/constraints.md b/source/documentation/0.16.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.16.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.16.0/features/containers.md b/source/documentation/0.16.0/features/containers.md
new file mode 100644
index 0000000..96c4162
--- /dev/null
+++ b/source/documentation/0.16.0/features/containers.md
@@ -0,0 +1,106 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host,
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      )
+    ]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.16.0/features/cron-jobs.md b/source/documentation/0.16.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.16.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.16.0/features/custom-executors.md b/source/documentation/0.16.0/features/custom-executors.md
new file mode 100644
index 0000000..40fc118
--- /dev/null
+++ b/source/documentation/0.16.0/features/custom-executors.md
@@ -0,0 +1,152 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                  | Description
+------------------------  | ---------------------------------
+host_path (required)      | Host path to mount inside the container.
+container_path (required) | Path inside the container where `host_path` will be mounted.
+mode (required)           | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+```
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.16.0/features/job-updates.md b/source/documentation/0.16.0/features/job-updates.md
new file mode 100644
index 0000000..22e3bbc
--- /dev/null
+++ b/source/documentation/0.16.0/features/job-updates.md
@@ -0,0 +1,111 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.16.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.16.0/features/mesos-fetcher.md b/source/documentation/0.16.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.16.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.16.0/features/multitenancy.md b/source/documentation/0.16.0/features/multitenancy.md
new file mode 100644
index 0000000..c2ed26b
--- /dev/null
+++ b/source/documentation/0.16.0/features/multitenancy.md
@@ -0,0 +1,81 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.16.0/features/resource-isolation.md b/source/documentation/0.16.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.16.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.16.0/features/service-discovery.md b/source/documentation/0.16.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.16.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.16.0/features/services.md b/source/documentation/0.16.0/features/services.md
new file mode 100644
index 0000000..7d7d5e1
--- /dev/null
+++ b/source/documentation/0.16.0/features/services.md
@@ -0,0 +1,99 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.16.0/features/sla-metrics.md b/source/documentation/0.16.0/features/sla-metrics.md
new file mode 100644
index 0000000..72e4d83
--- /dev/null
+++ b/source/documentation/0.16.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.16.0/features/webhooks.md b/source/documentation/0.16.0/features/webhooks.md
new file mode 100644
index 0000000..075aeec
--- /dev/null
+++ b/source/documentation/0.16.0/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
diff --git a/source/documentation/0.16.0/getting-started/overview.md b/source/documentation/0.16.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.16.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.16.0/getting-started/tutorial.md b/source/documentation/0.16.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.16.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.16.0/getting-started/vagrant.md b/source/documentation/0.16.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.16.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.16.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.16.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/CompletedTasks.png b/source/documentation/0.16.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.16.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/HelloWorldJob.png b/source/documentation/0.16.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.16.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/RoleJobs.png b/source/documentation/0.16.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.16.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/RunningJob.png b/source/documentation/0.16.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.16.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/ScheduledJobs.png b/source/documentation/0.16.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.16.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/TaskBreakdown.png b/source/documentation/0.16.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.16.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.16.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.16.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.16.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.16.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/components.odg b/source/documentation/0.16.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.16.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.16.0/images/components.png b/source/documentation/0.16.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.16.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/debug-client-test.png b/source/documentation/0.16.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.16.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/debugging-client-test.png b/source/documentation/0.16.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.16.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/killedtask.png b/source/documentation/0.16.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.16.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.16.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.16.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.16.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.16.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.16.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.16.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.16.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.16.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.16.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.16.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.16.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.16.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/runningtask.png b/source/documentation/0.16.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.16.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/stderr.png b/source/documentation/0.16.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.16.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.16.0/images/stdout.png b/source/documentation/0.16.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.16.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.16.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.16.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.16.0/index.html.md b/source/documentation/0.16.0/index.html.md
new file mode 100644
index 0000000..08accc5
--- /dev/null
+++ b/source/documentation/0.16.0/index.html.md
@@ -0,0 +1,75 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.16.0/operations/backup-restore.md b/source/documentation/0.16.0/operations/backup-restore.md
new file mode 100644
index 0000000..8dac40a
--- /dev/null
+++ b/source/documentation/0.16.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+# Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+# Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+# Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+# Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+# Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.16.0/operations/configuration.md b/source/documentation/0.16.0/operations/configuration.md
new file mode 100644
index 0000000..ef64ab3
--- /dev/null
+++ b/source/documentation/0.16.0/operations/configuration.md
@@ -0,0 +1,251 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    # Port and public ip used to communicate with the Mesos master and for the replicated log
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.  The default is every hour.
+* `-backup_dir`: Directory to write backups to.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.16.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
diff --git a/source/documentation/0.16.0/operations/installation.md b/source/documentation/0.16.0/operations/installation.md
new file mode 100644
index 0000000..0d237c8
--- /dev/null
+++ b/source/documentation/0.16.0/operations/installation.md
@@ -0,0 +1,312 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.15.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.15.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.15.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.15.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.15.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.15.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.15.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.15.0-1.el7.centos.aurora.x86_64.rpm
+
+### Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.15.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.15.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.15.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.15.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=0.28.2-2.0.27.ubuntu1404_amd64
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-0.28.2
+
+
+
+## Troubleshooting
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+### Scheduler not running
+
+### Symptom
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+## Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/) or [supervisord](http://supervisord.org/) to run the scheduler
+and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
diff --git a/source/documentation/0.16.0/operations/monitoring.md b/source/documentation/0.16.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.16.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.16.0/operations/security.md b/source/documentation/0.16.0/operations/security.md
new file mode 100644
index 0000000..46e0b8a
--- /dev/null
+++ b/source/documentation/0.16.0/operations/security.md
@@ -0,0 +1,340 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
\ No newline at end of file
diff --git a/source/documentation/0.16.0/operations/storage.md b/source/documentation/0.16.0/operations/storage.md
new file mode 100644
index 0000000..874dfe6
--- /dev/null
+++ b/source/documentation/0.16.0/operations/storage.md
@@ -0,0 +1,97 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Replicated Log Configuration](#replicated-log-configuration)
+- [Backup Configuration](#replicated-log-configuration)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.16.0/reference/client-cluster-configuration.md b/source/documentation/0.16.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..5e85802
--- /dev/null
+++ b/source/documentation/0.16.0/reference/client-cluster-configuration.md
@@ -0,0 +1,93 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
diff --git a/source/documentation/0.16.0/reference/client-commands.md b/source/documentation/0.16.0/reference/client-commands.md
new file mode 100644
index 0000000..36f0ecb
--- /dev/null
+++ b/source/documentation/0.16.0/reference/client-commands.md
@@ -0,0 +1,326 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.16.0/reference/client-hooks.md b/source/documentation/0.16.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.16.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.16.0/reference/configuration-best-practices.md b/source/documentation/0.16.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.16.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.16.0/reference/configuration-templating.md b/source/documentation/0.16.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.16.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.16.0/reference/configuration-tutorial.md b/source/documentation/0.16.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..dcfe505
--- /dev/null
+++ b/source/documentation/0.16.0/reference/configuration-tutorial.md
@@ -0,0 +1,511 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.16.0/reference/configuration.md b/source/documentation/0.16.0/reference/configuration.md
new file mode 100644
index 0000000..087efd1
--- /dev/null
+++ b/source/documentation/0.16.0/reference/configuration.md
@@ -0,0 +1,604 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial delay for performing a health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.16.0/reference/scheduler-configuration.md b/source/documentation/0.16.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..00815f1
--- /dev/null
+++ b/source/documentation/0.16.0/reference/scheduler-configuration.md
@@ -0,0 +1,248 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 100)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_start_initial_backoff (default (1, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default TwitterScheduler)
+	Name used to register the Aurora framework with Mesos. Changing this value can be backwards incompatible. For details, see MESOS-703.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [org.apache.aurora.scheduler.app.MoreModules$1@158a8276])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.16.0/reference/scheduler-endpoints.md b/source/documentation/0.16.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..d302e90
--- /dev/null
+++ b/source/documentation/0.16.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is the list of all such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  200 (OK) is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of 503 (SERVICE_UNAVAILABLE) is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of 502
+  (BAD_GATEWAY) is returned.
\ No newline at end of file
diff --git a/source/documentation/0.16.0/reference/task-lifecycle.md b/source/documentation/0.16.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..4dcb481
--- /dev/null
+++ b/source/documentation/0.16.0/reference/task-lifecycle.md
@@ -0,0 +1,146 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.17.0/additional-resources/presentations.md b/source/documentation/0.17.0/additional-resources/presentations.md
new file mode 100644
index 0000000..9957446
--- /dev/null
+++ b/source/documentation/0.17.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.17.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.17.0/additional-resources/tools.md b/source/documentation/0.17.0/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.17.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.17.0/contributing.md b/source/documentation/0.17.0/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.17.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.17.0/development/client.md b/source/documentation/0.17.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.17.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.17.0/development/committers-guide.md b/source/documentation/0.17.0/development/committers-guide.md
new file mode 100644
index 0000000..5c377bc
--- /dev/null
+++ b/source/documentation/0.17.0/development/committers-guide.md
@@ -0,0 +1,102 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.17.0/development/db-migration.md b/source/documentation/0.17.0/development/db-migration.md
new file mode 100644
index 0000000..38f2ea5
--- /dev/null
+++ b/source/documentation/0.17.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.17.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.17.0/development/design-documents.md b/source/documentation/0.17.0/development/design-documents.md
new file mode 100644
index 0000000..b335fa5
--- /dev/null
+++ b/source/documentation/0.17.0/development/design-documents.md
@@ -0,0 +1,22 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.17.0/development/design/command-hooks.md b/source/documentation/0.17.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.17.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.17.0/development/scheduler.md b/source/documentation/0.17.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.17.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.17.0/development/thermos.md b/source/documentation/0.17.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.17.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.17.0/development/thrift.md b/source/documentation/0.17.0/development/thrift.md
new file mode 100644
index 0000000..e5a470b
--- /dev/null
+++ b/source/documentation/0.17.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.17.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.17.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.17.0/development/ui.md b/source/documentation/0.17.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.17.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.17.0/features/constraints.md b/source/documentation/0.17.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.17.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.17.0/features/containers.md b/source/documentation/0.17.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.17.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.17.0/features/cron-jobs.md b/source/documentation/0.17.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.17.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.17.0/features/custom-executors.md b/source/documentation/0.17.0/features/custom-executors.md
new file mode 100644
index 0000000..40fc118
--- /dev/null
+++ b/source/documentation/0.17.0/features/custom-executors.md
@@ -0,0 +1,152 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                  | Description
+------------------------  | ---------------------------------
+host_path (required)      | Host path to mount inside the container.
+container_path (required) | Path inside the container where `host_path` will be mounted.
+mode (required)           | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+```
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.17.0/features/job-updates.md b/source/documentation/0.17.0/features/job-updates.md
new file mode 100644
index 0000000..18800d4
--- /dev/null
+++ b/source/documentation/0.17.0/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.17.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.17.0/features/mesos-fetcher.md b/source/documentation/0.17.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.17.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.17.0/features/multitenancy.md b/source/documentation/0.17.0/features/multitenancy.md
new file mode 100644
index 0000000..c2ed26b
--- /dev/null
+++ b/source/documentation/0.17.0/features/multitenancy.md
@@ -0,0 +1,81 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.17.0/features/resource-isolation.md b/source/documentation/0.17.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.17.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.17.0/features/service-discovery.md b/source/documentation/0.17.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.17.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.17.0/features/services.md b/source/documentation/0.17.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.17.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.17.0/features/sla-metrics.md b/source/documentation/0.17.0/features/sla-metrics.md
new file mode 100644
index 0000000..4804c96
--- /dev/null
+++ b/source/documentation/0.17.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.17.0/features/webhooks.md b/source/documentation/0.17.0/features/webhooks.md
new file mode 100644
index 0000000..075aeec
--- /dev/null
+++ b/source/documentation/0.17.0/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
diff --git a/source/documentation/0.17.0/getting-started/overview.md b/source/documentation/0.17.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.17.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.17.0/getting-started/tutorial.md b/source/documentation/0.17.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.17.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.17.0/getting-started/vagrant.md b/source/documentation/0.17.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.17.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.17.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.17.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/CompletedTasks.png b/source/documentation/0.17.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.17.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/HelloWorldJob.png b/source/documentation/0.17.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.17.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/RoleJobs.png b/source/documentation/0.17.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.17.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/RunningJob.png b/source/documentation/0.17.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.17.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/ScheduledJobs.png b/source/documentation/0.17.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.17.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/TaskBreakdown.png b/source/documentation/0.17.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.17.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.17.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.17.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.17.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.17.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/components.odg b/source/documentation/0.17.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.17.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.17.0/images/components.png b/source/documentation/0.17.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.17.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/debug-client-test.png b/source/documentation/0.17.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.17.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/debugging-client-test.png b/source/documentation/0.17.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.17.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/killedtask.png b/source/documentation/0.17.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.17.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.17.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.17.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.17.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.17.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.17.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.17.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.17.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.17.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.17.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.17.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.17.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.17.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/runningtask.png b/source/documentation/0.17.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.17.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/stderr.png b/source/documentation/0.17.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.17.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.17.0/images/stdout.png b/source/documentation/0.17.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.17.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.17.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.17.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.17.0/index.html.md b/source/documentation/0.17.0/index.html.md
new file mode 100644
index 0000000..08accc5
--- /dev/null
+++ b/source/documentation/0.17.0/index.html.md
@@ -0,0 +1,75 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.17.0/operations/backup-restore.md b/source/documentation/0.17.0/operations/backup-restore.md
new file mode 100644
index 0000000..8dac40a
--- /dev/null
+++ b/source/documentation/0.17.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+# Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+# Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+# Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+# Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+# Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.17.0/operations/configuration.md b/source/documentation/0.17.0/operations/configuration.md
new file mode 100644
index 0000000..92807a5
--- /dev/null
+++ b/source/documentation/0.17.0/operations/configuration.md
@@ -0,0 +1,251 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    # Port and public ip used to communicate with the Mesos master and for the replicated log
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.  The default is every hour.
+* `-backup_dir`: Directory to write backups to.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.17.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
diff --git a/source/documentation/0.17.0/operations/installation.md b/source/documentation/0.17.0/operations/installation.md
new file mode 100644
index 0000000..921ce1f
--- /dev/null
+++ b/source/documentation/0.17.0/operations/installation.md
@@ -0,0 +1,312 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+
+## Troubleshooting
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+### Scheduler not running
+
+### Symptom
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+## Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/) or [supervisord](http://supervisord.org/) to run the scheduler
+and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
diff --git a/source/documentation/0.17.0/operations/monitoring.md b/source/documentation/0.17.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.17.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.17.0/operations/security.md b/source/documentation/0.17.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.17.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.17.0/operations/storage.md b/source/documentation/0.17.0/operations/storage.md
new file mode 100644
index 0000000..874dfe6
--- /dev/null
+++ b/source/documentation/0.17.0/operations/storage.md
@@ -0,0 +1,97 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Replicated Log Configuration](#replicated-log-configuration)
+- [Backup Configuration](#replicated-log-configuration)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.17.0/reference/client-cluster-configuration.md b/source/documentation/0.17.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.17.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.17.0/reference/client-commands.md b/source/documentation/0.17.0/reference/client-commands.md
new file mode 100644
index 0000000..36f0ecb
--- /dev/null
+++ b/source/documentation/0.17.0/reference/client-commands.md
@@ -0,0 +1,326 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.17.0/reference/client-hooks.md b/source/documentation/0.17.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.17.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.17.0/reference/configuration-best-practices.md b/source/documentation/0.17.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.17.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.17.0/reference/configuration-templating.md b/source/documentation/0.17.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.17.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.17.0/reference/configuration-tutorial.md b/source/documentation/0.17.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.17.0/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.17.0/reference/configuration.md b/source/documentation/0.17.0/reference/configuration.md
new file mode 100644
index 0000000..ba5b11b
--- /dev/null
+++ b/source/documentation/0.17.0/reference/configuration.md
@@ -0,0 +1,605 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.17.0/reference/scheduler-configuration.md b/source/documentation/0.17.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..cdded91
--- /dev/null
+++ b/source/documentation/0.17.0/reference/scheduler-configuration.md
@@ -0,0 +1,264 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_db_metrics (default true)
+	Whether to use MyBatis interceptor to measure the timing of intercepted Statements.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-snapshot_hydrate_stores (default [locks, hosts, quota, job_updates])
+	Which H2-backed stores to fully hydrate on the Snapshot.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.17.0/reference/scheduler-endpoints.md b/source/documentation/0.17.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..d302e90
--- /dev/null
+++ b/source/documentation/0.17.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is the list of all such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  200 (OK) is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of 503 (SERVICE_UNAVAILABLE) is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of 502
+  (BAD_GATEWAY) is returned.
\ No newline at end of file
diff --git a/source/documentation/0.17.0/reference/task-lifecycle.md b/source/documentation/0.17.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..cf1b679
--- /dev/null
+++ b/source/documentation/0.17.0/reference/task-lifecycle.md
@@ -0,0 +1,148 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.18.0/additional-resources/presentations.md b/source/documentation/0.18.0/additional-resources/presentations.md
new file mode 100644
index 0000000..37d72ed
--- /dev/null
+++ b/source/documentation/0.18.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.18.0/additional-resources/tools.md b/source/documentation/0.18.0/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.18.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.18.0/contributing.md b/source/documentation/0.18.0/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.18.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.18.0/development/client.md b/source/documentation/0.18.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.18.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.18.0/development/committers-guide.md b/source/documentation/0.18.0/development/committers-guide.md
new file mode 100644
index 0000000..5c377bc
--- /dev/null
+++ b/source/documentation/0.18.0/development/committers-guide.md
@@ -0,0 +1,102 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.18.0/development/db-migration.md b/source/documentation/0.18.0/development/db-migration.md
new file mode 100644
index 0000000..6e0f77f
--- /dev/null
+++ b/source/documentation/0.18.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.18.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.18.0/development/design-documents.md b/source/documentation/0.18.0/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.18.0/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.18.0/development/design/command-hooks.md b/source/documentation/0.18.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.18.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.18.0/development/scheduler.md b/source/documentation/0.18.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.18.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.18.0/development/thermos.md b/source/documentation/0.18.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.18.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.18.0/development/thrift.md b/source/documentation/0.18.0/development/thrift.md
new file mode 100644
index 0000000..ddf6789
--- /dev/null
+++ b/source/documentation/0.18.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.18.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.18.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.18.0/development/ui.md b/source/documentation/0.18.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.18.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.18.0/features/constraints.md b/source/documentation/0.18.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.18.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.18.0/features/containers.md b/source/documentation/0.18.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.18.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.18.0/features/cron-jobs.md b/source/documentation/0.18.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.18.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.18.0/features/custom-executors.md b/source/documentation/0.18.0/features/custom-executors.md
new file mode 100644
index 0000000..1357c1e
--- /dev/null
+++ b/source/documentation/0.18.0/features/custom-executors.md
@@ -0,0 +1,153 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.18.0/features/job-updates.md b/source/documentation/0.18.0/features/job-updates.md
new file mode 100644
index 0000000..727ef9b
--- /dev/null
+++ b/source/documentation/0.18.0/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.18.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.18.0/features/mesos-fetcher.md b/source/documentation/0.18.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.18.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.18.0/features/multitenancy.md b/source/documentation/0.18.0/features/multitenancy.md
new file mode 100644
index 0000000..c2ed26b
--- /dev/null
+++ b/source/documentation/0.18.0/features/multitenancy.md
@@ -0,0 +1,81 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.18.0/features/resource-isolation.md b/source/documentation/0.18.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.18.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.18.0/features/service-discovery.md b/source/documentation/0.18.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.18.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.18.0/features/services.md b/source/documentation/0.18.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.18.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.18.0/features/sla-metrics.md b/source/documentation/0.18.0/features/sla-metrics.md
new file mode 100644
index 0000000..327c153
--- /dev/null
+++ b/source/documentation/0.18.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.18.0/features/webhooks.md b/source/documentation/0.18.0/features/webhooks.md
new file mode 100644
index 0000000..a060975
--- /dev/null
+++ b/source/documentation/0.18.0/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
diff --git a/source/documentation/0.18.0/getting-started/overview.md b/source/documentation/0.18.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.18.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.18.0/getting-started/tutorial.md b/source/documentation/0.18.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.18.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.18.0/getting-started/vagrant.md b/source/documentation/0.18.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.18.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.18.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.18.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/CompletedTasks.png b/source/documentation/0.18.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.18.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/HelloWorldJob.png b/source/documentation/0.18.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.18.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/RoleJobs.png b/source/documentation/0.18.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.18.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/RunningJob.png b/source/documentation/0.18.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.18.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/ScheduledJobs.png b/source/documentation/0.18.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.18.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/TaskBreakdown.png b/source/documentation/0.18.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.18.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.18.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.18.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.18.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.18.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/components.odg b/source/documentation/0.18.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.18.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.18.0/images/components.png b/source/documentation/0.18.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.18.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/debug-client-test.png b/source/documentation/0.18.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.18.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/debugging-client-test.png b/source/documentation/0.18.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.18.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/killedtask.png b/source/documentation/0.18.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.18.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.18.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.18.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.18.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.18.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.18.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.18.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.18.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.18.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.18.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.18.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.18.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.18.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/runningtask.png b/source/documentation/0.18.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.18.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/stderr.png b/source/documentation/0.18.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.18.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.18.0/images/stdout.png b/source/documentation/0.18.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.18.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.18.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.18.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.18.0/index.html.md b/source/documentation/0.18.0/index.html.md
new file mode 100644
index 0000000..fc414c4
--- /dev/null
+++ b/source/documentation/0.18.0/index.html.md
@@ -0,0 +1,79 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.18.0/operations/backup-restore.md b/source/documentation/0.18.0/operations/backup-restore.md
new file mode 100644
index 0000000..b9e7d3a
--- /dev/null
+++ b/source/documentation/0.18.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+## Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+## Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+## Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+## Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.18.0/operations/configuration.md b/source/documentation/0.18.0/operations/configuration.md
new file mode 100644
index 0000000..34dc5dc
--- /dev/null
+++ b/source/documentation/0.18.0/operations/configuration.md
@@ -0,0 +1,339 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.18.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    --enable_update_affinity=true
+    --update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    --max_schedule_attempts_per_sec=40
+    --initial_schedule_penalty=1secs
+    --max_schedule_penalty=1mins
+    --scheduling_max_batch_size=3
+    --max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
diff --git a/source/documentation/0.18.0/operations/installation.md b/source/documentation/0.18.0/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.18.0/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.18.0/operations/monitoring.md b/source/documentation/0.18.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.18.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.18.0/operations/security.md b/source/documentation/0.18.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.18.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.18.0/operations/storage.md b/source/documentation/0.18.0/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.18.0/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.18.0/operations/troubleshooting.md b/source/documentation/0.18.0/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.18.0/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.18.0/operations/upgrades.md b/source/documentation/0.18.0/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.18.0/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.18.0/reference/client-cluster-configuration.md b/source/documentation/0.18.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.18.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.18.0/reference/client-commands.md b/source/documentation/0.18.0/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.18.0/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.18.0/reference/client-hooks.md b/source/documentation/0.18.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.18.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.18.0/reference/configuration-best-practices.md b/source/documentation/0.18.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.18.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.18.0/reference/configuration-templating.md b/source/documentation/0.18.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.18.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.18.0/reference/configuration-tutorial.md b/source/documentation/0.18.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.18.0/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.18.0/reference/configuration.md b/source/documentation/0.18.0/reference/configuration.md
new file mode 100644
index 0000000..a6bf4b3
--- /dev/null
+++ b/source/documentation/0.18.0/reference/configuration.md
@@ -0,0 +1,614 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```volume_path```      | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.18.0/reference/observer-configuration.md b/source/documentation/0.18.0/reference/observer-configuration.md
new file mode 100644
index 0000000..0126295
--- /dev/null
+++ b/source/documentation/0.18.0/reference/observer-configuration.md
@@ -0,0 +1,89 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.18.0/reference/scheduler-configuration.md b/source/documentation/0.18.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..42033d6
--- /dev/null
+++ b/source/documentation/0.18.0/reference/scheduler-configuration.md
@@ -0,0 +1,268 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_db_metrics (default true)
+	Whether to use MyBatis interceptor to measure the timing of intercepted Statements.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-snapshot_hydrate_stores (default [locks, hosts, quota, job_updates])
+	Which H2-backed stores to fully hydrate on the Snapshot.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.state.FirstFitTaskAssignerModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.18.0/reference/scheduler-endpoints.md b/source/documentation/0.18.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.18.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.18.0/reference/task-lifecycle.md b/source/documentation/0.18.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..cf1b679
--- /dev/null
+++ b/source/documentation/0.18.0/reference/task-lifecycle.md
@@ -0,0 +1,148 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.18.1/additional-resources/presentations.md b/source/documentation/0.18.1/additional-resources/presentations.md
new file mode 100644
index 0000000..95d01bd
--- /dev/null
+++ b/source/documentation/0.18.1/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.18.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.18.1/additional-resources/tools.md b/source/documentation/0.18.1/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.18.1/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.18.1/contributing.md b/source/documentation/0.18.1/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.18.1/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.18.1/development/client.md b/source/documentation/0.18.1/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.18.1/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.18.1/development/committers-guide.md b/source/documentation/0.18.1/development/committers-guide.md
new file mode 100644
index 0000000..5c377bc
--- /dev/null
+++ b/source/documentation/0.18.1/development/committers-guide.md
@@ -0,0 +1,102 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
diff --git a/source/documentation/0.18.1/development/db-migration.md b/source/documentation/0.18.1/development/db-migration.md
new file mode 100644
index 0000000..419cb40
--- /dev/null
+++ b/source/documentation/0.18.1/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.18.1/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.18.1/development/design-documents.md b/source/documentation/0.18.1/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.18.1/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.18.1/development/design/command-hooks.md b/source/documentation/0.18.1/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.18.1/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.18.1/development/scheduler.md b/source/documentation/0.18.1/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.18.1/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.18.1/development/thermos.md b/source/documentation/0.18.1/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.18.1/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.18.1/development/thrift.md b/source/documentation/0.18.1/development/thrift.md
new file mode 100644
index 0000000..0247f55
--- /dev/null
+++ b/source/documentation/0.18.1/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.18.1/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.18.1/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.18.1/development/ui.md b/source/documentation/0.18.1/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.18.1/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.18.1/features/constraints.md b/source/documentation/0.18.1/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.18.1/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.18.1/features/containers.md b/source/documentation/0.18.1/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.18.1/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.18.1/features/cron-jobs.md b/source/documentation/0.18.1/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.18.1/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.18.1/features/custom-executors.md b/source/documentation/0.18.1/features/custom-executors.md
new file mode 100644
index 0000000..1357c1e
--- /dev/null
+++ b/source/documentation/0.18.1/features/custom-executors.md
@@ -0,0 +1,153 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.18.1/features/job-updates.md b/source/documentation/0.18.1/features/job-updates.md
new file mode 100644
index 0000000..7e35be9
--- /dev/null
+++ b/source/documentation/0.18.1/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.18.1/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.18.1/features/mesos-fetcher.md b/source/documentation/0.18.1/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.18.1/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.18.1/features/multitenancy.md b/source/documentation/0.18.1/features/multitenancy.md
new file mode 100644
index 0000000..c2ed26b
--- /dev/null
+++ b/source/documentation/0.18.1/features/multitenancy.md
@@ -0,0 +1,81 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the client and the scheduler so as to allow any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.18.1/features/resource-isolation.md b/source/documentation/0.18.1/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.18.1/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.18.1/features/service-discovery.md b/source/documentation/0.18.1/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.18.1/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.18.1/features/services.md b/source/documentation/0.18.1/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.18.1/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.18.1/features/sla-metrics.md b/source/documentation/0.18.1/features/sla-metrics.md
new file mode 100644
index 0000000..e35744d
--- /dev/null
+++ b/source/documentation/0.18.1/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.18.1/features/webhooks.md b/source/documentation/0.18.1/features/webhooks.md
new file mode 100644
index 0000000..a060975
--- /dev/null
+++ b/source/documentation/0.18.1/features/webhooks.md
@@ -0,0 +1,80 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
diff --git a/source/documentation/0.18.1/getting-started/overview.md b/source/documentation/0.18.1/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.18.1/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.18.1/getting-started/tutorial.md b/source/documentation/0.18.1/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.18.1/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.18.1/getting-started/vagrant.md b/source/documentation/0.18.1/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.18.1/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.18.1/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.18.1/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/CompletedTasks.png b/source/documentation/0.18.1/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.18.1/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/HelloWorldJob.png b/source/documentation/0.18.1/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.18.1/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/RoleJobs.png b/source/documentation/0.18.1/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.18.1/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/RunningJob.png b/source/documentation/0.18.1/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.18.1/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/ScheduledJobs.png b/source/documentation/0.18.1/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.18.1/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/TaskBreakdown.png b/source/documentation/0.18.1/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.18.1/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.18.1/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.18.1/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.18.1/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.18.1/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/components.odg b/source/documentation/0.18.1/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.18.1/images/components.odg
Binary files differ
diff --git a/source/documentation/0.18.1/images/components.png b/source/documentation/0.18.1/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.18.1/images/components.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/debug-client-test.png b/source/documentation/0.18.1/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.18.1/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/debugging-client-test.png b/source/documentation/0.18.1/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.18.1/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/killedtask.png b/source/documentation/0.18.1/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.18.1/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.18.1/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.18.1/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.18.1/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.18.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.18.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.18.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.18.1/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.18.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.18.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.18.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.18.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.18.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/runningtask.png b/source/documentation/0.18.1/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.18.1/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/stderr.png b/source/documentation/0.18.1/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.18.1/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.18.1/images/stdout.png b/source/documentation/0.18.1/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.18.1/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.18.1/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.18.1/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.18.1/index.html.md b/source/documentation/0.18.1/index.html.md
new file mode 100644
index 0000000..fc414c4
--- /dev/null
+++ b/source/documentation/0.18.1/index.html.md
@@ -0,0 +1,79 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.18.1/operations/backup-restore.md b/source/documentation/0.18.1/operations/backup-restore.md
new file mode 100644
index 0000000..b9e7d3a
--- /dev/null
+++ b/source/documentation/0.18.1/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+## Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+## Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+## Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+## Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.18.1/operations/configuration.md b/source/documentation/0.18.1/operations/configuration.md
new file mode 100644
index 0000000..89c529d
--- /dev/null
+++ b/source/documentation/0.18.1/operations/configuration.md
@@ -0,0 +1,339 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.18.1/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    --enable_update_affinity=true
+    --update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    --max_schedule_attempts_per_sec=40
+    --initial_schedule_penalty=1secs
+    --max_schedule_penalty=1mins
+    --scheduling_max_batch_size=3
+    --max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
diff --git a/source/documentation/0.18.1/operations/installation.md b/source/documentation/0.18.1/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.18.1/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.18.1/operations/monitoring.md b/source/documentation/0.18.1/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.18.1/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.18.1/operations/security.md b/source/documentation/0.18.1/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.18.1/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.18.1/operations/storage.md b/source/documentation/0.18.1/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.18.1/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.18.1/operations/troubleshooting.md b/source/documentation/0.18.1/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.18.1/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.18.1/operations/upgrades.md b/source/documentation/0.18.1/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.18.1/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.18.1/reference/client-cluster-configuration.md b/source/documentation/0.18.1/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.18.1/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.18.1/reference/client-commands.md b/source/documentation/0.18.1/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.18.1/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.18.1/reference/client-hooks.md b/source/documentation/0.18.1/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.18.1/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.18.1/reference/configuration-best-practices.md b/source/documentation/0.18.1/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.18.1/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.18.1/reference/configuration-templating.md b/source/documentation/0.18.1/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.18.1/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.18.1/reference/configuration-tutorial.md b/source/documentation/0.18.1/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.18.1/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.18.1/reference/configuration.md b/source/documentation/0.18.1/reference/configuration.md
new file mode 100644
index 0000000..a6bf4b3
--- /dev/null
+++ b/source/documentation/0.18.1/reference/configuration.md
@@ -0,0 +1,614 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```volume_path```      | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.18.1/reference/observer-configuration.md b/source/documentation/0.18.1/reference/observer-configuration.md
new file mode 100644
index 0000000..0126295
--- /dev/null
+++ b/source/documentation/0.18.1/reference/observer-configuration.md
@@ -0,0 +1,89 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.18.1/reference/scheduler-configuration.md b/source/documentation/0.18.1/reference/scheduler-configuration.md
new file mode 100644
index 0000000..42033d6
--- /dev/null
+++ b/source/documentation/0.18.1/reference/scheduler-configuration.md
@@ -0,0 +1,268 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-db_max_active_connection_count [must be > 0]
+	Max number of connections to use with database via MyBatis
+-db_max_idle_connection_count [must be > 0]
+	Max number of idle connections to the database via MyBatis
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-db_lock_timeout (default (1, mins))
+	H2 table lock timeout
+-db_row_gc_interval (default (2, hrs))
+	Interval on which to scan the database for unused row references.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_db_metrics (default true)
+	Whether to use MyBatis interceptor to measure the timing of intercepted Statements.
+-enable_h2_console (default false)
+	Enable H2 DB management console.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-slow_query_log_threshold (default (25, ms))
+	Log all queries that take at least this long to execute.
+-snapshot_hydrate_stores (default [locks, hosts, quota, job_updates])
+	Which H2-backed stores to fully hydrate on the Snapshot.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.state.FirstFitTaskAssignerModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-use_beta_db_task_store (default false)
+	Whether to use the experimental database-backed task store.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.18.1/reference/scheduler-endpoints.md b/source/documentation/0.18.1/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.18.1/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.18.1/reference/task-lifecycle.md b/source/documentation/0.18.1/reference/task-lifecycle.md
new file mode 100644
index 0000000..cf1b679
--- /dev/null
+++ b/source/documentation/0.18.1/reference/task-lifecycle.md
@@ -0,0 +1,148 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait 5 seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait 5 seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.19.0/additional-resources/presentations.md b/source/documentation/0.19.0/additional-resources/presentations.md
new file mode 100644
index 0000000..bebef5a
--- /dev/null
+++ b/source/documentation/0.19.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.19.0/additional-resources/tools.md b/source/documentation/0.19.0/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.19.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.19.0/contributing.md b/source/documentation/0.19.0/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.19.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.19.0/development/client.md b/source/documentation/0.19.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.19.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.19.0/development/committers-guide.md b/source/documentation/0.19.0/development/committers-guide.md
new file mode 100644
index 0000000..397d63c
--- /dev/null
+++ b/source/documentation/0.19.0/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/0.19.0/development/db-migration.md b/source/documentation/0.19.0/development/db-migration.md
new file mode 100644
index 0000000..c74d3f5
--- /dev/null
+++ b/source/documentation/0.19.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.19.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.19.0/development/design-documents.md b/source/documentation/0.19.0/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.19.0/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.19.0/development/design/command-hooks.md b/source/documentation/0.19.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.19.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.19.0/development/scheduler.md b/source/documentation/0.19.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.19.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.19.0/development/thermos.md b/source/documentation/0.19.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.19.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.19.0/development/thrift.md b/source/documentation/0.19.0/development/thrift.md
new file mode 100644
index 0000000..cc1948f
--- /dev/null
+++ b/source/documentation/0.19.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.19.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.19.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.19.0/development/ui.md b/source/documentation/0.19.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.19.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.19.0/features/constraints.md b/source/documentation/0.19.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.19.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.19.0/features/containers.md b/source/documentation/0.19.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.19.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.19.0/features/cron-jobs.md b/source/documentation/0.19.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.19.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.19.0/features/custom-executors.md b/source/documentation/0.19.0/features/custom-executors.md
new file mode 100644
index 0000000..1357c1e
--- /dev/null
+++ b/source/documentation/0.19.0/features/custom-executors.md
@@ -0,0 +1,153 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.19.0/features/job-updates.md b/source/documentation/0.19.0/features/job-updates.md
new file mode 100644
index 0000000..781252e
--- /dev/null
+++ b/source/documentation/0.19.0/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.19.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.19.0/features/mesos-fetcher.md b/source/documentation/0.19.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.19.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.19.0/features/multitenancy.md b/source/documentation/0.19.0/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/0.19.0/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.19.0/features/resource-isolation.md b/source/documentation/0.19.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.19.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.19.0/features/service-discovery.md b/source/documentation/0.19.0/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.19.0/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.19.0/features/services.md b/source/documentation/0.19.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.19.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.19.0/features/sla-metrics.md b/source/documentation/0.19.0/features/sla-metrics.md
new file mode 100644
index 0000000..eadd35f
--- /dev/null
+++ b/source/documentation/0.19.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.19.0/features/webhooks.md b/source/documentation/0.19.0/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/0.19.0/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/0.19.0/getting-started/overview.md b/source/documentation/0.19.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.19.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.19.0/getting-started/tutorial.md b/source/documentation/0.19.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.19.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.19.0/getting-started/vagrant.md b/source/documentation/0.19.0/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.19.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.19.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.19.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/CompletedTasks.png b/source/documentation/0.19.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.19.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/HelloWorldJob.png b/source/documentation/0.19.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.19.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/RoleJobs.png b/source/documentation/0.19.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.19.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/RunningJob.png b/source/documentation/0.19.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.19.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/ScheduledJobs.png b/source/documentation/0.19.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.19.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/TaskBreakdown.png b/source/documentation/0.19.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.19.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.19.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.19.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.19.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.19.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/components.odg b/source/documentation/0.19.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.19.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.19.0/images/components.png b/source/documentation/0.19.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.19.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/debug-client-test.png b/source/documentation/0.19.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.19.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/debugging-client-test.png b/source/documentation/0.19.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.19.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/killedtask.png b/source/documentation/0.19.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.19.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.19.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.19.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.19.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.19.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.19.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.19.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.19.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.19.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.19.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.19.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.19.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.19.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/runningtask.png b/source/documentation/0.19.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.19.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/stderr.png b/source/documentation/0.19.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.19.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.19.0/images/stdout.png b/source/documentation/0.19.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.19.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.19.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.19.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.19.0/index.html.md b/source/documentation/0.19.0/index.html.md
new file mode 100644
index 0000000..fc414c4
--- /dev/null
+++ b/source/documentation/0.19.0/index.html.md
@@ -0,0 +1,79 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.19.0/operations/backup-restore.md b/source/documentation/0.19.0/operations/backup-restore.md
new file mode 100644
index 0000000..b9e7d3a
--- /dev/null
+++ b/source/documentation/0.19.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+## Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+## Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+## Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+## Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.19.0/operations/configuration.md b/source/documentation/0.19.0/operations/configuration.md
new file mode 100644
index 0000000..def0f21
--- /dev/null
+++ b/source/documentation/0.19.0/operations/configuration.md
@@ -0,0 +1,339 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.19.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    --enable_update_affinity=true
+    --update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    --max_schedule_attempts_per_sec=40
+    --initial_schedule_penalty=1secs
+    --max_schedule_penalty=1mins
+    --scheduling_max_batch_size=3
+    --max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
diff --git a/source/documentation/0.19.0/operations/installation.md b/source/documentation/0.19.0/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.19.0/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.19.0/operations/monitoring.md b/source/documentation/0.19.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.19.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.19.0/operations/security.md b/source/documentation/0.19.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.19.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.19.0/operations/storage.md b/source/documentation/0.19.0/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.19.0/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.19.0/operations/troubleshooting.md b/source/documentation/0.19.0/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.19.0/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.19.0/operations/upgrades.md b/source/documentation/0.19.0/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.19.0/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.19.0/reference/client-cluster-configuration.md b/source/documentation/0.19.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.19.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.19.0/reference/client-commands.md b/source/documentation/0.19.0/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.19.0/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.19.0/reference/client-hooks.md b/source/documentation/0.19.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.19.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.19.0/reference/configuration-best-practices.md b/source/documentation/0.19.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.19.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.19.0/reference/configuration-templating.md b/source/documentation/0.19.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.19.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.19.0/reference/configuration-tutorial.md b/source/documentation/0.19.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.19.0/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.19.0/reference/configuration.md b/source/documentation/0.19.0/reference/configuration.md
new file mode 100644
index 0000000..d435082
--- /dev/null
+++ b/source/documentation/0.19.0/reference/configuration.md
@@ -0,0 +1,619 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.19.0/reference/observer-configuration.md b/source/documentation/0.19.0/reference/observer-configuration.md
new file mode 100644
index 0000000..0126295
--- /dev/null
+++ b/source/documentation/0.19.0/reference/observer-configuration.md
@@ -0,0 +1,89 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.19.0/reference/scheduler-configuration.md b/source/documentation/0.19.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..05f84d2
--- /dev/null
+++ b/source/documentation/0.19.0/reference/scheduler-configuration.md
@@ -0,0 +1,252 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-allowed_job_environments (default ^(prod|devel|test|staging\d*)$)
+	Regular expression describing the environments that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.state.FirstFitTaskAssignerModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-thrift_method_interceptor_modules (default [])
+	Additional Guice modules for intercepting Thrift method calls.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.19.0/reference/scheduler-endpoints.md b/source/documentation/0.19.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.19.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.19.0/reference/task-lifecycle.md b/source/documentation/0.19.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..8ec0077
--- /dev/null
+++ b/source/documentation/0.19.0/reference/task-lifecycle.md
@@ -0,0 +1,150 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.19.1/additional-resources/presentations.md b/source/documentation/0.19.1/additional-resources/presentations.md
new file mode 100644
index 0000000..ef708cd
--- /dev/null
+++ b/source/documentation/0.19.1/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.19.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.19.1/additional-resources/tools.md b/source/documentation/0.19.1/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.19.1/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.19.1/contributing.md b/source/documentation/0.19.1/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.19.1/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.19.1/development/client.md b/source/documentation/0.19.1/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.19.1/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.19.1/development/committers-guide.md b/source/documentation/0.19.1/development/committers-guide.md
new file mode 100644
index 0000000..397d63c
--- /dev/null
+++ b/source/documentation/0.19.1/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/0.19.1/development/db-migration.md b/source/documentation/0.19.1/development/db-migration.md
new file mode 100644
index 0000000..ed672a5
--- /dev/null
+++ b/source/documentation/0.19.1/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.19.1/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.19.1/development/design-documents.md b/source/documentation/0.19.1/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.19.1/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.19.1/development/design/command-hooks.md b/source/documentation/0.19.1/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.19.1/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.19.1/development/scheduler.md b/source/documentation/0.19.1/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.19.1/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.19.1/development/thermos.md b/source/documentation/0.19.1/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.19.1/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.19.1/development/thrift.md b/source/documentation/0.19.1/development/thrift.md
new file mode 100644
index 0000000..6b1045e
--- /dev/null
+++ b/source/documentation/0.19.1/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.19.1/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.19.1/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.19.1/development/ui.md b/source/documentation/0.19.1/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.19.1/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.19.1/features/constraints.md b/source/documentation/0.19.1/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.19.1/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.19.1/features/containers.md b/source/documentation/0.19.1/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.19.1/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.19.1/features/cron-jobs.md b/source/documentation/0.19.1/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.19.1/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.19.1/features/custom-executors.md b/source/documentation/0.19.1/features/custom-executors.md
new file mode 100644
index 0000000..1357c1e
--- /dev/null
+++ b/source/documentation/0.19.1/features/custom-executors.md
@@ -0,0 +1,153 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+At this time, it is not currently possible create a job that runs on a custom executor using the default
+Aurora client. To allow the scheduler to pick the correct executor, the `JobConfiguration.TaskConfig.ExecutorConfig.name`
+field must be set to match the name used in the custom executor configuration blob. (e.g. to run a job using myExecutor,
+`JobConfiguration.TaskConfig.ExecutorConfig.name` must be set to `myExecutor`). While support for modifying
+this field in Pystachio created, the easiest way to launch jobs with custom executors is to use
+an existing custom Client such as [gorealis](https://github.com/rdelval/gorealis).
diff --git a/source/documentation/0.19.1/features/job-updates.md b/source/documentation/0.19.1/features/job-updates.md
new file mode 100644
index 0000000..8a37343
--- /dev/null
+++ b/source/documentation/0.19.1/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.19.1/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.19.1/features/mesos-fetcher.md b/source/documentation/0.19.1/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.19.1/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.19.1/features/multitenancy.md b/source/documentation/0.19.1/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/0.19.1/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.19.1/features/resource-isolation.md b/source/documentation/0.19.1/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.19.1/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.19.1/features/service-discovery.md b/source/documentation/0.19.1/features/service-discovery.md
new file mode 100644
index 0000000..5a97eaf
--- /dev/null
+++ b/source/documentation/0.19.1/features/service-discovery.md
@@ -0,0 +1,42 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.19.1/features/services.md b/source/documentation/0.19.1/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.19.1/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.19.1/features/sla-metrics.md b/source/documentation/0.19.1/features/sla-metrics.md
new file mode 100644
index 0000000..8f27721
--- /dev/null
+++ b/source/documentation/0.19.1/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.19.1/features/webhooks.md b/source/documentation/0.19.1/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/0.19.1/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/0.19.1/getting-started/overview.md b/source/documentation/0.19.1/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.19.1/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.19.1/getting-started/tutorial.md b/source/documentation/0.19.1/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.19.1/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.19.1/getting-started/vagrant.md b/source/documentation/0.19.1/getting-started/vagrant.md
new file mode 100644
index 0000000..26b9a80
--- /dev/null
+++ b/source/documentation/0.19.1/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/upstart/aurora-scheduler.log`
+* Observer: `/var/log/upstart/aurora-thermos-observer.log`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.19.1/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.19.1/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/CompletedTasks.png b/source/documentation/0.19.1/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.19.1/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/HelloWorldJob.png b/source/documentation/0.19.1/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.19.1/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/RoleJobs.png b/source/documentation/0.19.1/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.19.1/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/RunningJob.png b/source/documentation/0.19.1/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.19.1/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/ScheduledJobs.png b/source/documentation/0.19.1/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.19.1/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/TaskBreakdown.png b/source/documentation/0.19.1/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.19.1/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.19.1/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.19.1/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.19.1/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.19.1/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/components.odg b/source/documentation/0.19.1/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.19.1/images/components.odg
Binary files differ
diff --git a/source/documentation/0.19.1/images/components.png b/source/documentation/0.19.1/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.19.1/images/components.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/debug-client-test.png b/source/documentation/0.19.1/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.19.1/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/debugging-client-test.png b/source/documentation/0.19.1/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.19.1/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/killedtask.png b/source/documentation/0.19.1/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.19.1/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.19.1/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.19.1/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.19.1/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.19.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.19.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.19.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.19.1/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.19.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.19.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.19.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.19.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.19.1/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/runningtask.png b/source/documentation/0.19.1/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.19.1/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/stderr.png b/source/documentation/0.19.1/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.19.1/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.19.1/images/stdout.png b/source/documentation/0.19.1/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.19.1/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.19.1/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.19.1/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.19.1/index.html.md b/source/documentation/0.19.1/index.html.md
new file mode 100644
index 0000000..fc414c4
--- /dev/null
+++ b/source/documentation/0.19.1/index.html.md
@@ -0,0 +1,79 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.19.1/operations/backup-restore.md b/source/documentation/0.19.1/operations/backup-restore.md
new file mode 100644
index 0000000..b9e7d3a
--- /dev/null
+++ b/source/documentation/0.19.1/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+## Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+## Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+## Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+## Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.19.1/operations/configuration.md b/source/documentation/0.19.1/operations/configuration.md
new file mode 100644
index 0000000..9604b31
--- /dev/null
+++ b/source/documentation/0.19.1/operations/configuration.md
@@ -0,0 +1,339 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.19.1/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    --enable_update_affinity=true
+    --update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    --max_schedule_attempts_per_sec=40
+    --initial_schedule_penalty=1secs
+    --max_schedule_penalty=1mins
+    --scheduling_max_batch_size=3
+    --max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
diff --git a/source/documentation/0.19.1/operations/installation.md b/source/documentation/0.19.1/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.19.1/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.19.1/operations/monitoring.md b/source/documentation/0.19.1/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.19.1/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.19.1/operations/security.md b/source/documentation/0.19.1/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.19.1/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.19.1/operations/storage.md b/source/documentation/0.19.1/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.19.1/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.19.1/operations/troubleshooting.md b/source/documentation/0.19.1/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.19.1/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.19.1/operations/upgrades.md b/source/documentation/0.19.1/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.19.1/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.19.1/reference/client-cluster-configuration.md b/source/documentation/0.19.1/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.19.1/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.19.1/reference/client-commands.md b/source/documentation/0.19.1/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.19.1/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.19.1/reference/client-hooks.md b/source/documentation/0.19.1/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.19.1/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.19.1/reference/configuration-best-practices.md b/source/documentation/0.19.1/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.19.1/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.19.1/reference/configuration-templating.md b/source/documentation/0.19.1/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.19.1/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.19.1/reference/configuration-tutorial.md b/source/documentation/0.19.1/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.19.1/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.19.1/reference/configuration.md b/source/documentation/0.19.1/reference/configuration.md
new file mode 100644
index 0000000..d435082
--- /dev/null
+++ b/source/documentation/0.19.1/reference/configuration.md
@@ -0,0 +1,619 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.19.1/reference/observer-configuration.md b/source/documentation/0.19.1/reference/observer-configuration.md
new file mode 100644
index 0000000..0126295
--- /dev/null
+++ b/source/documentation/0.19.1/reference/observer-configuration.md
@@ -0,0 +1,89 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.19.1/reference/scheduler-configuration.md b/source/documentation/0.19.1/reference/scheduler-configuration.md
new file mode 100644
index 0000000..05f84d2
--- /dev/null
+++ b/source/documentation/0.19.1/reference/scheduler-configuration.md
@@ -0,0 +1,252 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-allowed_job_environments (default ^(prod|devel|test|staging\d*)$)
+	Regular expression describing the environments that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.state.FirstFitTaskAssignerModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-thrift_method_interceptor_modules (default [])
+	Additional Guice modules for intercepting Thrift method calls.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.19.1/reference/scheduler-endpoints.md b/source/documentation/0.19.1/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.19.1/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.19.1/reference/task-lifecycle.md b/source/documentation/0.19.1/reference/task-lifecycle.md
new file mode 100644
index 0000000..8ec0077
--- /dev/null
+++ b/source/documentation/0.19.1/reference/task-lifecycle.md
@@ -0,0 +1,150 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.20.0/additional-resources/presentations.md b/source/documentation/0.20.0/additional-resources/presentations.md
new file mode 100644
index 0000000..a0834d6
--- /dev/null
+++ b/source/documentation/0.20.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.20.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.20.0/additional-resources/tools.md b/source/documentation/0.20.0/additional-resources/tools.md
new file mode 100644
index 0000000..7f3613d
--- /dev/null
+++ b/source/documentation/0.20.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/rdelval/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.20.0/contributing.md b/source/documentation/0.20.0/contributing.md
new file mode 100644
index 0000000..d3d1a1f
--- /dev/null
+++ b/source/documentation/0.20.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Zameer Manji (zmanji) and
+Joshua Cohen (jcohen). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.20.0/development/client.md b/source/documentation/0.20.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.20.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.20.0/development/committers-guide.md b/source/documentation/0.20.0/development/committers-guide.md
new file mode 100644
index 0000000..397d63c
--- /dev/null
+++ b/source/documentation/0.20.0/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/0.20.0/development/db-migration.md b/source/documentation/0.20.0/development/db-migration.md
new file mode 100644
index 0000000..13283af
--- /dev/null
+++ b/source/documentation/0.20.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.20.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.20.0/development/design-documents.md b/source/documentation/0.20.0/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.20.0/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.20.0/development/design/command-hooks.md b/source/documentation/0.20.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.20.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.20.0/development/scheduler.md b/source/documentation/0.20.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.20.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.20.0/development/thermos.md b/source/documentation/0.20.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.20.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.20.0/development/thrift.md b/source/documentation/0.20.0/development/thrift.md
new file mode 100644
index 0000000..88ded14
--- /dev/null
+++ b/source/documentation/0.20.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.20.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.20.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.20.0/development/ui.md b/source/documentation/0.20.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.20.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.20.0/features/constraints.md b/source/documentation/0.20.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.20.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.20.0/features/containers.md b/source/documentation/0.20.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.20.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.20.0/features/cron-jobs.md b/source/documentation/0.20.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.20.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.20.0/features/custom-executors.md b/source/documentation/0.20.0/features/custom-executors.md
new file mode 100644
index 0000000..da3f2a2
--- /dev/null
+++ b/source/documentation/0.20.0/features/custom-executors.md
@@ -0,0 +1,166 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+To launch tasks using a custom executor,
+an [ExecutorConfig](../../reference/configuration/#executorconfig-objects) object must be added to
+the Job or Service object. The `name` parameter of ExecutorConfig must match the name of an executor
+defined in the JSON object provided to the scheduler at startup time.
+
+For example, if we desire to launch tasks using `myExecutor` (defined above), we may do so in
+the following manner:
+
+```
+jobs = [Service(
+  task = task,
+  cluster = 'devcluster',
+  role = 'www-data',
+  environment = 'prod',
+  name = 'hello',
+  executor_config = ExecutorConfig(name='myExecutor'))]
+```
+
+This will create a Service Job which will launch tasks using myExecutor instead of Thermos.
diff --git a/source/documentation/0.20.0/features/job-updates.md b/source/documentation/0.20.0/features/job-updates.md
new file mode 100644
index 0000000..79899e1
--- /dev/null
+++ b/source/documentation/0.20.0/features/job-updates.md
@@ -0,0 +1,110 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.20.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.20.0/features/mesos-fetcher.md b/source/documentation/0.20.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.20.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.20.0/features/multitenancy.md b/source/documentation/0.20.0/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/0.20.0/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.20.0/features/resource-isolation.md b/source/documentation/0.20.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.20.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.20.0/features/service-discovery.md b/source/documentation/0.20.0/features/service-discovery.md
new file mode 100644
index 0000000..b35a569
--- /dev/null
+++ b/source/documentation/0.20.0/features/service-discovery.md
@@ -0,0 +1,43 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports. To have this working properly it's needed to
+  add `-populate_discovery_info` to scheduler's configuration.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.20.0/features/services.md b/source/documentation/0.20.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.20.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.20.0/features/sla-metrics.md b/source/documentation/0.20.0/features/sla-metrics.md
new file mode 100644
index 0000000..bdc1135
--- /dev/null
+++ b/source/documentation/0.20.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.20.0/features/webhooks.md b/source/documentation/0.20.0/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/0.20.0/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/0.20.0/getting-started/overview.md b/source/documentation/0.20.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.20.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.20.0/getting-started/tutorial.md b/source/documentation/0.20.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.20.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.20.0/getting-started/vagrant.md b/source/documentation/0.20.0/getting-started/vagrant.md
new file mode 100644
index 0000000..e4d4168
--- /dev/null
+++ b/source/documentation/0.20.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/aurora/scheduler.log` or `sudo journalctl -u aurora-scheduler`
+* Observer: `/var/log/thermos/observer.log` or `sudo journalctl -u thermos-observer`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.20.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.20.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/CompletedTasks.png b/source/documentation/0.20.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.20.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/HelloWorldJob.png b/source/documentation/0.20.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.20.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/RoleJobs.png b/source/documentation/0.20.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.20.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/RunningJob.png b/source/documentation/0.20.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.20.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/ScheduledJobs.png b/source/documentation/0.20.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.20.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/TaskBreakdown.png b/source/documentation/0.20.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.20.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.20.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.20.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.20.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.20.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/components.odg b/source/documentation/0.20.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.20.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.20.0/images/components.png b/source/documentation/0.20.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.20.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/debug-client-test.png b/source/documentation/0.20.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.20.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/debugging-client-test.png b/source/documentation/0.20.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.20.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/killedtask.png b/source/documentation/0.20.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.20.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.20.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.20.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.20.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.20.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.20.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.20.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.20.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.20.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.20.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.20.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.20.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.20.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/runningtask.png b/source/documentation/0.20.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.20.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/stderr.png b/source/documentation/0.20.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.20.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.20.0/images/stdout.png b/source/documentation/0.20.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.20.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.20.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.20.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.20.0/index.html.md b/source/documentation/0.20.0/index.html.md
new file mode 100644
index 0000000..fc414c4
--- /dev/null
+++ b/source/documentation/0.20.0/index.html.md
@@ -0,0 +1,79 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.20.0/operations/backup-restore.md b/source/documentation/0.20.0/operations/backup-restore.md
new file mode 100644
index 0000000..b9e7d3a
--- /dev/null
+++ b/source/documentation/0.20.0/operations/backup-restore.md
@@ -0,0 +1,91 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+## Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored.
+
+* Configure `aurora_admin` access to run all commands listed in
+  [Restore from backup](#restore-from-backup) section locally on the leading scheduler:
+  * Make sure the [clusters.json](../../reference/client-cluster-configuration/) file configured to
+    access scheduler directly. Set `scheduler_uri` setting and remove `zk`. Since leader can get
+    re-elected during the restore steps, consider doing it on all scheduler replicas.
+  * Depending on your particular security approach you will need to either turn off scheduler
+    authorization by removing scheduler `-http_authentication_mechanism` flag or make sure the
+    direct scheduler access is properly authorized. E.g.: in case of Kerberos you will need to make
+    a `/etc/hosts` file change to match your local IP to the scheduler URL configured in keytabs:
+
+        <local_ip> <scheduler_domain_in_keytabs>
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:1111/mesos/master`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360mins`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+## Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicated log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `sudo mesos-log initialize --path=<-native_log_file_path>`
+* Start schedulers
+
+## Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * examining the `scheduler_lifecycle_LEADER_AWAITING_REGISTRATION` metric at the scheduler
+    `/vars` endpoint. Leader will have 1. All other replicas - 0.
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler's `-backup_dir` folder and stage
+recovery by running the following command on a leader
+`aurora_admin scheduler_stage_recovery --bypass-leader-redirect <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks --bypass-leader-redirect` and
+`scheduler_delete_recovery_tasks --bypass-leader-redirect` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesos replicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery --bypass-leader-redirect  <cluster>`
+
+## Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
diff --git a/source/documentation/0.20.0/operations/configuration.md b/source/documentation/0.20.0/operations/configuration.md
new file mode 100644
index 0000000..f0afbfe
--- /dev/null
+++ b/source/documentation/0.20.0/operations/configuration.md
@@ -0,0 +1,339 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.20.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    --enable_update_affinity=true
+    --update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    --max_schedule_attempts_per_sec=40
+    --initial_schedule_penalty=1secs
+    --max_schedule_penalty=1mins
+    --scheduling_max_batch_size=3
+    --max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
diff --git a/source/documentation/0.20.0/operations/installation.md b/source/documentation/0.20.0/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.20.0/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.20.0/operations/monitoring.md b/source/documentation/0.20.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.20.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.20.0/operations/security.md b/source/documentation/0.20.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.20.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.20.0/operations/storage.md b/source/documentation/0.20.0/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.20.0/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.20.0/operations/troubleshooting.md b/source/documentation/0.20.0/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.20.0/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.20.0/operations/upgrades.md b/source/documentation/0.20.0/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.20.0/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.20.0/reference/client-cluster-configuration.md b/source/documentation/0.20.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.20.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.20.0/reference/client-commands.md b/source/documentation/0.20.0/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.20.0/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.20.0/reference/client-hooks.md b/source/documentation/0.20.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.20.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.20.0/reference/configuration-best-practices.md b/source/documentation/0.20.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.20.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.20.0/reference/configuration-templating.md b/source/documentation/0.20.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.20.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.20.0/reference/configuration-tutorial.md b/source/documentation/0.20.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.20.0/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.20.0/reference/configuration.md b/source/documentation/0.20.0/reference/configuration.md
new file mode 100644
index 0000000..9ef290b
--- /dev/null
+++ b/source/documentation/0.20.0/reference/configuration.md
@@ -0,0 +1,645 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+  ```partition_policy``` | ```PartitionPolicy``` object | An optional partition policy that allows job owners to define how to handle partitions for running tasks (in partition-aware Aurora clusters)
+  ```metadata``` | list of ```Metadata``` objects | list of ```Metadata``` objects for user's customized metadata information.
+  ```executor_config``` | ```ExecutorConfig``` object | Allows choosing an alternative executor defined in `custom_executor_config` to be used instead of Thermos. Tasks will be launched with Thermos as the executor by default. See [Custom Executors](../../features/custom-executors/) for more info.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+### PartitionPolicy Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```reschedule```               | Boolean   | Whether or not to reschedule when running tasks become partitioned (Default: True)
+| ```delay_secs```               | Integer   | How long to delay transitioning to LOST when running tasks are partitioned. (Default: 0)
+
+### Metadata Objects
+
+Describes a piece of user metadata in a key value pair
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```key```        | String          | Indicate which metadata the user provides
+  ```value```      | String          | Provide the metadata content for corresponding key
+
+### ExecutorConfig Objects
+
+Describes an Executor name and data to pass to the Mesos Task
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```name```               | String   | Name of the executor to use for this task. Must match the name of an executor in `custom_executor_config` or Thermos (`AuroraExecutor`). (Default: AuroraExecutor)
+| ```data```               | String   | Data blob to pass on to the executor. (Default: "")
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.20.0/reference/observer-configuration.md b/source/documentation/0.20.0/reference/observer-configuration.md
new file mode 100644
index 0000000..dcaca23
--- /dev/null
+++ b/source/documentation/0.20.0/reference/observer-configuration.md
@@ -0,0 +1,103 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+  --enable_mesos_disk_collector
+                        Delegate per task disk usage collection to agent.
+                        Should be enabled in conjunction to disk isolation in
+                        Mesos-agent. This is not compatible with an
+                        authenticated agent API. [default: False]
+  --agent_api_url=AGENT_API_URL
+                        Mesos Agent API url. [default:
+                        http://localhost:5051/containers]
+  --executor_id_json_path=EXECUTOR_ID_JSON_PATH
+                        `jmespath` to executor_id key in agent response json
+                        object. [default: executor_id]
+  --disk_usage_json_path=DISK_USAGE_JSON_PATH
+                        `jmespath` to disk usage bytes value in agent response
+                        json object. [default: statistics.disk_used_bytes]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.20.0/reference/scheduler-configuration.md b/source/documentation/0.20.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..ca43ce6
--- /dev/null
+++ b/source/documentation/0.20.0/reference/scheduler-configuration.md
@@ -0,0 +1,256 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-allowed_job_environments (default ^(prod|devel|test|staging\d*)$)
+	Regular expression describing the environments that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-offer_set_module (default [class org.apache.aurora.scheduler.offers.OfferSetModule])
+  Guice module for replacing offer holding and scheduling logic.
+-partition_aware (default false)
+  Whether or not to integrate with the partition-aware Mesos capabilities.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.scheduling.TaskAssignerImplModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-thrift_method_interceptor_modules (default [])
+	Additional Guice modules for intercepting Thrift method calls.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.20.0/reference/scheduler-endpoints.md b/source/documentation/0.20.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.20.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.20.0/reference/task-lifecycle.md b/source/documentation/0.20.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..b4642b3
--- /dev/null
+++ b/source/documentation/0.20.0/reference/task-lifecycle.md
@@ -0,0 +1,175 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+
+## RUNNING to PARTITIONED states
+If Aurora is configured to enable partition awareness, a task which is in a
+running state can transition to `PARTITIONED`. This happens when the state
+of the task in Mesos becomes unknown. By default Aurora errs on the side of
+availability, so all tasks that transition to `PARTITIONED` are immediately
+transitioned to `LOST`.
+
+This policy is not ideal for all types of workloads you may wish to run in
+your Aurora cluster, e.g. for jobs where task failures are very expensive.
+So job owners may set their own `PartitionPolicy` where they can control
+how long to remain in `PARTITIONED` before transitioning to `LOST`. Or they
+can disable any automatic attempts to `reschedule` when in `PARTITIONED`,
+effectively waiting out the partition for as long as possible.
+
+
+## PARTITIONED and transient states
+The `PartitionPolicy` provided by users only applies to tasks which are
+currently running. When tasks are moving in and out of transient states,
+e.g. tasks being updated, restarted, preempted, etc., `PARTITIONED` tasks
+are moved immediately to `LOST`. This prevents situations where system
+or user-initiated actions are blocked indefinitely waiting for partitions
+to resolve (that may never be resolved).
+
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.21.0/additional-resources/presentations.md b/source/documentation/0.21.0/additional-resources/presentations.md
new file mode 100644
index 0000000..db4253d
--- /dev/null
+++ b/source/documentation/0.21.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.21.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.21.0/additional-resources/tools.md b/source/documentation/0.21.0/additional-resources/tools.md
new file mode 100644
index 0000000..262d16d
--- /dev/null
+++ b/source/documentation/0.21.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/paypal/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.21.0/contributing.md b/source/documentation/0.21.0/contributing.md
new file mode 100644
index 0000000..76d4cef
--- /dev/null
+++ b/source/documentation/0.21.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Stephan Erb (StephanErb) and
+Renan DelValle (rdelvalle). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.21.0/development/client.md b/source/documentation/0.21.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.21.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.21.0/development/committers-guide.md b/source/documentation/0.21.0/development/committers-guide.md
new file mode 100644
index 0000000..aadf254
--- /dev/null
+++ b/source/documentation/0.21.0/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/0.21.0/development/db-migration.md b/source/documentation/0.21.0/development/db-migration.md
new file mode 100644
index 0000000..5b46d05
--- /dev/null
+++ b/source/documentation/0.21.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.21.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.21.0/development/design-documents.md b/source/documentation/0.21.0/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.21.0/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.21.0/development/design/command-hooks.md b/source/documentation/0.21.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.21.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.21.0/development/scheduler.md b/source/documentation/0.21.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.21.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.21.0/development/thermos.md b/source/documentation/0.21.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.21.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.21.0/development/thrift.md b/source/documentation/0.21.0/development/thrift.md
new file mode 100644
index 0000000..b1a28fe
--- /dev/null
+++ b/source/documentation/0.21.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.21.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.21.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.21.0/development/ui.md b/source/documentation/0.21.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.21.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.21.0/features/constraints.md b/source/documentation/0.21.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.21.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.21.0/features/containers.md b/source/documentation/0.21.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.21.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.21.0/features/cron-jobs.md b/source/documentation/0.21.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.21.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.21.0/features/custom-executors.md b/source/documentation/0.21.0/features/custom-executors.md
new file mode 100644
index 0000000..da3f2a2
--- /dev/null
+++ b/source/documentation/0.21.0/features/custom-executors.md
@@ -0,0 +1,166 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+To launch tasks using a custom executor,
+an [ExecutorConfig](../../reference/configuration/#executorconfig-objects) object must be added to
+the Job or Service object. The `name` parameter of ExecutorConfig must match the name of an executor
+defined in the JSON object provided to the scheduler at startup time.
+
+For example, if we desire to launch tasks using `myExecutor` (defined above), we may do so in
+the following manner:
+
+```
+jobs = [Service(
+  task = task,
+  cluster = 'devcluster',
+  role = 'www-data',
+  environment = 'prod',
+  name = 'hello',
+  executor_config = ExecutorConfig(name='myExecutor'))]
+```
+
+This will create a Service Job which will launch tasks using myExecutor instead of Thermos.
diff --git a/source/documentation/0.21.0/features/job-updates.md b/source/documentation/0.21.0/features/job-updates.md
new file mode 100644
index 0000000..8605f00
--- /dev/null
+++ b/source/documentation/0.21.0/features/job-updates.md
@@ -0,0 +1,123 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.21.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+SLA-Aware Updates
+-----------------
+
+Updates can take advantage of [Custom SLA Requirements](../../features/sla-requirements/) and
+specify the `sla_aware=True` option within
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) to only update instances if
+the action will maintain the task's SLA requirements. This feature allows updates to avoid killing
+too many instances in the face of unexpected failures outside of the update range.
+
+See the [Using the `sla_aware` option](../../reference/configuration/#using-the-sla-aware-option)
+for more information on how to use this feature.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.21.0/features/mesos-fetcher.md b/source/documentation/0.21.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.21.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.21.0/features/multitenancy.md b/source/documentation/0.21.0/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/0.21.0/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.21.0/features/resource-isolation.md b/source/documentation/0.21.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.21.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.21.0/features/service-discovery.md b/source/documentation/0.21.0/features/service-discovery.md
new file mode 100644
index 0000000..b35a569
--- /dev/null
+++ b/source/documentation/0.21.0/features/service-discovery.md
@@ -0,0 +1,43 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports. To have this working properly it's needed to
+  add `-populate_discovery_info` to scheduler's configuration.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.21.0/features/services.md b/source/documentation/0.21.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.21.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.21.0/features/sla-metrics.md b/source/documentation/0.21.0/features/sla-metrics.md
new file mode 100644
index 0000000..bf6497f
--- /dev/null
+++ b/source/documentation/0.21.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.21.0/features/sla-requirements.md b/source/documentation/0.21.0/features/sla-requirements.md
new file mode 100644
index 0000000..ee876f3
--- /dev/null
+++ b/source/documentation/0.21.0/features/sla-requirements.md
@@ -0,0 +1,185 @@
+SLA Requirements
+================
+
+- [Overview](#overview)
+- [Default SLA](#default-sla)
+- [Custom SLA](#custom-sla)
+  - [Count-based](#count-based)
+  - [Percentage-based](#percentage-based)
+  - [Coordinator-based](#coordinator-based)
+
+## Overview
+
+Aurora guarantees SLA requirements for jobs. These requirements limit the impact of cluster-wide
+maintenance operations on the jobs. For instance, when an operator upgrades
+the OS on all the Mesos agent machines, the tasks scheduled on them needs to be drained.
+By specifying the SLA requirements a job can make sure that it has enough instances to
+continue operating safely without incurring downtime.
+
+> SLA is defined as minimum number of active tasks required for a job every duration window.
+A task is active if it was in `RUNNING` state during the last duration window.
+
+There is a [default](#default-sla) SLA guarantee for
+[preferred](../../features/multitenancy/#configuration-tiers) tier jobs and it is also possible to
+specify [custom](#custom-sla) SLA requirements.
+
+## Default SLA
+
+Aurora guarantees a default SLA requirement for tasks in
+[preferred](../../features/multitenancy/#configuration-tiers) tier.
+
+> 95% of tasks in a job will be `active` for every 30 mins.
+
+
+## Custom SLA
+
+For jobs that require different SLA requirements, Aurora allows jobs to specify their own
+SLA requirements via the `SlaPolicies`. There are 3 different ways to express SLA requirements.
+
+### [Count-based](../../reference/configuration/#countslapolicy-objects)
+
+For jobs that need a minimum `number` of instances to be running all the time,
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects)
+provides the ability to express the minimum number of required active instances (i.e. number of
+tasks that are `RUNNING` for at least `duration_secs`). For instance, if we have a
+`replicated-service` that has 3 instances and needs at least 2 instances every 30 minutes to be
+treated healthy, the SLA requirement can be expressed with a
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'replicated-service',
+  role = 'www-data',
+  instances = 3,
+  sla_policy = CountSlaPolicy(
+    count = 2,
+    duration_secs = 1800
+  )
+  ...
+)
+```
+
+### [Percentage-based](../../reference/configuration/#percentageslapolicy-objects)
+
+For jobs that need a minimum `percentage` of instances to be running all the time,
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) provides the
+ability to express the minimum percentage of required active instances (i.e. percentage of tasks
+that are `RUNNING` for at least `duration_secs`). For instance, if we have a `webservice` that
+has 10000 instances for handling peak load and cannot have more than 0.1% of the instances down
+for every 1 hr, the SLA requirement can be expressed with a
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'frontend-service',
+  role = 'www-data',
+  instances = 10000,
+  sla_policy = PercentageSlaPolicy(
+    percentage = 99.9,
+    duration_secs = 3600
+  )
+  ...
+)
+```
+
+### [Coordinator-based](../../reference/configuration/#coordinatorslapolicy-objects)
+
+When none of the above methods are enough to describe the SLA requirements for a job, then the SLA
+calculation can be off-loaded to a custom service called the `Coordinator`. The `Coordinator` needs
+to expose an endpoint that will be called to check if removal of a task will affect the SLA
+requirements for the job. This is useful to control the number of tasks that undergoes maintenance
+at a time, without affected the SLA for the application.
+
+Consider the example, where we have a `storage-service` stores 2 replicas of an object. Each replica
+is distributed across the instances, such that replicas are stored on different hosts. In addition
+a consistent-hash is used for distributing the data across the instances.
+
+When an instance needs to be drained (say for host-maintenance), we have to make sure that at least 1 of
+the 2 replicas remains available. In such a case, a `Coordinator` service can be used to maintain
+the SLA guarantees required for the job.
+
+The job can be configured with a
+[`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) to specify the
+coordinator endpoint and the field in the response JSON that indicates if the SLA will be affected
+or not affected, when the task is removed.
+
+```python
+Job(
+  name = 'storage-service',
+  role = 'www-data',
+  sla_policy = CoordinatorSlaPolicy(
+    coordinator_url = 'http://coordinator.example.com',
+    status_key = 'drain'
+  )
+  ...
+)
+```
+
+
+#### Coordinator Interface [Experimental]
+
+When a [`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) is
+specified for a job, any action that requires removing a task
+(such as drains) will be required to get approval from the `Coordinator` before proceeding. The
+coordinator service needs to expose a HTTP endpoint, that can take a `task-key` param
+(`<cluster>/<role>/<env>/<name>/<instance>`) and a json body describing the task
+details, force maintenance countdown (ms) and other params and return a response json that will
+contain the boolean status for allowing or disallowing the task's removal.
+
+##### Request:
+```javascript
+POST /
+  ?task=<cluster>/<role>/<env>/<name>/<instance>
+
+{
+  "forceMaintenanceCountdownMs": "604755646",
+  "task": "cluster/role/devel/job/1",
+  "taskConfig": {
+    "assignedTask": {
+      "taskId": "taskA",
+      "slaveHost": "a",
+      "task": {
+        "job": {
+          "role": "role",
+          "environment": "devel",
+          "name": "job"
+        },
+        ...
+      },
+      "assignedPorts": {
+        "http": 1000
+      },
+      "instanceId": 1
+      ...
+    },
+    ...
+  }
+}
+```
+
+##### Response:
+```json
+{
+  "drain": true
+}
+```
+
+If Coordinator allows removal of the task, then the task’s
+[termination lifecycle](../../reference/configuration/#httplifecycleconfig-objects)
+is triggered. If Coordinator does not allow removal, then the request will be retried again in the
+future.
+
+#### Coordinator Actions
+
+Coordinator endpoint get its own lock and this is used to serializes calls to the Coordinator.
+It guarantees that only one concurrent request is sent to a coordinator endpoint. This allows
+coordinators to simply look the current state of the tasks to determine its SLA (without having
+to worry about in-flight and pending requests). However if there are multiple coordinators,
+maintenance can be done in parallel across all the coordinators.
+
+_Note: Single concurrent request to a coordinator endpoint does not translate as exactly-once
+guarantee. The coordinator must be able to handle duplicate drain
+requests for the same task._
+
+
+
diff --git a/source/documentation/0.21.0/features/webhooks.md b/source/documentation/0.21.0/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/0.21.0/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/0.21.0/getting-started/overview.md b/source/documentation/0.21.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.21.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.21.0/getting-started/tutorial.md b/source/documentation/0.21.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.21.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.21.0/getting-started/vagrant.md b/source/documentation/0.21.0/getting-started/vagrant.md
new file mode 100644
index 0000000..e4d4168
--- /dev/null
+++ b/source/documentation/0.21.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/aurora/scheduler.log` or `sudo journalctl -u aurora-scheduler`
+* Observer: `/var/log/thermos/observer.log` or `sudo journalctl -u thermos-observer`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.21.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.21.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/CompletedTasks.png b/source/documentation/0.21.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.21.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/HelloWorldJob.png b/source/documentation/0.21.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.21.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/RoleJobs.png b/source/documentation/0.21.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.21.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/RunningJob.png b/source/documentation/0.21.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.21.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/ScheduledJobs.png b/source/documentation/0.21.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.21.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/TaskBreakdown.png b/source/documentation/0.21.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.21.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.21.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.21.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.21.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.21.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/components.odg b/source/documentation/0.21.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.21.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.21.0/images/components.png b/source/documentation/0.21.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.21.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/debug-client-test.png b/source/documentation/0.21.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.21.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/debugging-client-test.png b/source/documentation/0.21.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.21.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/killedtask.png b/source/documentation/0.21.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.21.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.21.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.21.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.21.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.21.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.21.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.21.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.21.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.21.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.21.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.21.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.21.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.21.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/runningtask.png b/source/documentation/0.21.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.21.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/stderr.png b/source/documentation/0.21.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.21.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.21.0/images/stdout.png b/source/documentation/0.21.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.21.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.21.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.21.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.21.0/index.html.md b/source/documentation/0.21.0/index.html.md
new file mode 100644
index 0000000..fe770d6
--- /dev/null
+++ b/source/documentation/0.21.0/index.html.md
@@ -0,0 +1,80 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [SLA Requirements](features/sla-requirements/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.21.0/operations/backup-restore.md b/source/documentation/0.21.0/operations/backup-restore.md
new file mode 100644
index 0000000..2230614
--- /dev/null
+++ b/source/documentation/0.21.0/operations/backup-restore.md
@@ -0,0 +1,80 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+##  Preparation
+
+* Stop all scheduler instances.
+
+* Pick a backup to use for rehydrating the mesos-replicated log. Backups can be found in the
+directory given to the scheduler as the `-backup_dir` argument. Backups are stored in the format
+`scheduler-backup-<yyyy-MM-dd-HH-mm>`.
+
+* If running the Aurora Scheduler in HA mode, pick a single scheduler instance to rehydrate.
+
+* Locate the `recovery-tool` in your setup. If Aurora was installed using a Debian package
+generated by our `aurora-packaging` script, the recovery tool can be found
+in `/usr/share/aurora/bin/recovery-tool`.
+
+## Cleanup
+
+* Delete (or move) the Mesos replicated log path for each scheduler instance. The location of the
+Mesos replicated log file path can be found by looking at the value given to the flag
+`-native_log_file_path` for each instance.
+
+* Initialize the Mesos replicated log files using the mesos-log tool:
+```
+sudo su -u <USER> mesos-log initialize --path=<native_log_file_path>
+```
+Where `USER` is the user under which the scheduler instance will be run. For installations using
+Debian packages, the default user will be `aurora`. You may alternatively choose to specify
+a group as well by passing the `-g <GROUP>` option to `su`.
+Note that if the user under which the Aurora scheduler instance is run _does not_ have permissions
+to read this directory and the files it contains, the instance will fail to start.
+
+## Restore from backup
+
+* Run the `recovery-tool`. Wherever the flags match those used for the scheduler instance,
+use the same values:
+```
+$ recovery-tool -from BACKUP \
+-to LOG \
+-backup=<selected_backup_location> \
+-native_log_zk_group_path=<native_log_zk_group_path> \
+-native_log_file_path=<native_log_file_path> \
+-zk_endpoints=<zk_endpoints>
+```
+
+## Bring scheduler instances back online
+
+### If running in HA Mode
+
+* Start the rehydrated scheduler instance along with enough cleaned up instances to
+meet the `-native_log_quorum_size`. The mesos-replicated log algorithm will replenish
+the "blank" scheduler instances with the information from the rehydrated instance.
+
+* Start any remaining scheduler instances.
+
+### If running in singleton mode
+
+* Start the single scheduler instance.
+
+
diff --git a/source/documentation/0.21.0/operations/configuration.md b/source/documentation/0.21.0/operations/configuration.md
new file mode 100644
index 0000000..27b0b90
--- /dev/null
+++ b/source/documentation/0.21.0/operations/configuration.md
@@ -0,0 +1,380 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.21.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    -enable_update_affinity=true
+    -update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    -max_schedule_attempts_per_sec=40
+    -initial_schedule_penalty=1secs
+    -max_schedule_penalty=1mins
+    -scheduling_max_batch_size=3
+    -max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
+
+## Cluster Maintenance
+
+Aurora performs maintenance related task drains. One of the scheduler options that can control
+how often the scheduler polls for maintenance work can be controlled via,
+
+    -host_maintenance_polling_interval=1min
+
+## Enforcing SLA limitations
+
+Since tasks can specify their own `SLAPolicy`, the cluster needs to limit these SLA requirements.
+Too aggressive a requirement can permanently block any type of maintenance work
+(ex: OS/Kernel/Security upgrades) on a host and hold it hostage.
+
+An operator can control the limits for SLA requirements via these scheduler configuration options:
+
+    -max_sla_duration_secs=2hrs
+    -min_required_instances_for_sla_check=20
+
+_Note: These limits only apply for `CountSlaPolicy` and `PercentageSlaPolicy`._
+
+### Limiting Coordinator SLA
+
+With `CoordinatorSlaPolicy` the SLA calculation is off-loaded to an external HTTP service. Some
+relevant scheduler configuration options are,
+
+    -sla_coordinator_timeout=1min
+    -max_parallel_coordinated_maintenance=10
+
+Since handing off the SLA calculation to an external service can potentially block maintenance
+on hosts for an indefinite amount of time (either due to a mis-configured coordinator or due to
+a valid degraded service). In those situations the following metrics will be helpful to identify the
+offending tasks.
+
+    sla_coordinator_user_errors_*     (counter tracking number of times the coordinator for the task
+                                       returned a bad response.)
+    sla_coordinator_errors_*          (counter tracking number of times the scheduler was not able
+                                       to communicate with the coordinator of the task.)
+    sla_coordinator_lock_starvation_* (counter tracking number of times the scheduler was not able to
+                                       get the lock for the coordinator of the task.)
+
diff --git a/source/documentation/0.21.0/operations/installation.md b/source/documentation/0.21.0/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.21.0/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.21.0/operations/monitoring.md b/source/documentation/0.21.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.21.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.21.0/operations/security.md b/source/documentation/0.21.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.21.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.21.0/operations/storage.md b/source/documentation/0.21.0/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.21.0/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.21.0/operations/troubleshooting.md b/source/documentation/0.21.0/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.21.0/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.21.0/operations/upgrades.md b/source/documentation/0.21.0/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.21.0/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.21.0/reference/client-cluster-configuration.md b/source/documentation/0.21.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.21.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.21.0/reference/client-commands.md b/source/documentation/0.21.0/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.21.0/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.21.0/reference/client-hooks.md b/source/documentation/0.21.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.21.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.21.0/reference/configuration-best-practices.md b/source/documentation/0.21.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.21.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.21.0/reference/configuration-templating.md b/source/documentation/0.21.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.21.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.21.0/reference/configuration-tutorial.md b/source/documentation/0.21.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..c170d47
--- /dev/null
+++ b/source/documentation/0.21.0/reference/configuration-tutorial.md
@@ -0,0 +1,531 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+      name = 'setup'
+      cmdline = '''cat <<EOF > .thermos_profile
+                   export RESULT=hello
+                   EOF'''
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
\ No newline at end of file
diff --git a/source/documentation/0.21.0/reference/configuration.md b/source/documentation/0.21.0/reference/configuration.md
new file mode 100644
index 0000000..6e1bb68
--- /dev/null
+++ b/source/documentation/0.21.0/reference/configuration.md
@@ -0,0 +1,704 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+    - [SlaPolicy Objects](#slapolicy-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL\_EXISTING Kill the previous run, and schedule the new run CANCEL\_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+  ```partition_policy``` | ```PartitionPolicy``` object | An optional partition policy that allows job owners to define how to handle partitions for running tasks (in partition-aware Aurora clusters)
+  ```metadata``` | list of ```Metadata``` objects | list of ```Metadata``` objects for user's customized metadata information.
+  ```executor_config``` | ```ExecutorConfig``` object | Allows choosing an alternative executor defined in `custom_executor_config` to be used instead of Thermos. Tasks will be launched with Thermos as the executor by default. See [Custom Executors](../../features/custom-executors/) for more info.
+  ```sla_policy``` |  Choice of ```CountSlaPolicy```, ```PercentageSlaPolicy``` or ```CoordinatorSlaPolicy``` object | An optional SLA policy that allows job owners to describe the SLA requirements for the job. See [SlaPolicy Objects](#slapolicy-objects) for more information.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+| ```sla_aware```              | boolean  | When True, updates will only update an instance if it does not break the task's specified [SLA Requirements](../../features/sla-requirements/). (Default: None)
+
+#### Using the `sla_aware` option
+
+There are some nuances around the `sla_aware` option that users should be aware of:
+
+- SLA-aware updates work in tandem with maintenance. Draining a host that has an instance of the
+job being updated affects the SLA and thus will be taken into account when the update determines
+whether or not it is safe to update another instance.
+- SLA-aware updates will use the [SLAPolicy](../../features/sla-requirements/#custom-sla) of the
+*newest* configuration when determining whether or not it is safe to update an instance. For
+example, if the current configuration specifies a
+[PercentageSlaPolicy](../../features/sla-requirements/#percentageslapolicy-objects) that allows for
+5% of instances to be down and the updated configuration increaes this value to 10%, the SLA
+calculation will be done using the 10% policy. Be mindful of this when doing an update that
+modifies the `SLAPolicy` since it may be possible to put the old configuration in a bad state
+that the new configuration would not be affected by. Additionally, if the update is rolled back,
+then the rollback will use the old `SLAPolicy` (or none if there was not one previously).
+- If using the [CoordinatorSlaPolicy](../../features/sla-requirements/#coordinatorslapolicy-objects),
+it is important to pay attention to the `batch_size` of the update. If you have a complex SLA
+requirement, then you may be limiting the throughput of your updates with an insufficient
+`batch_size`. For example, imagine you have a job with 9 instance that represents three
+replicated caches, and you can only update one instance per replica set: `[0 1 2]
+[3 4 5] [6 7 8]` (the number indicates the instance ID and the brackets represent replica
+sets). If your `batch_size` is 3, then you will slowly update one replica set at a time. If your
+`batch_size` is 9, then you can update all replica sets in parallel and thus speeding up the update.
+- If an instance fails an SLA check for an update, then it will be rechecked starting at a delay
+from `sla_aware_kill_retry_min_delay` and exponentially increasing up to
+`sla_aware_kill_retry_max_delay`. These are cluster-operator set values.
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+### PartitionPolicy Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```reschedule```               | Boolean   | Whether or not to reschedule when running tasks become partitioned (Default: True)
+| ```delay_secs```               | Integer   | How long to delay transitioning to LOST when running tasks are partitioned. (Default: 0)
+
+### Metadata Objects
+
+Describes a piece of user metadata in a key value pair
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```key```        | String          | Indicate which metadata the user provides
+  ```value```      | String          | Provide the metadata content for corresponding key
+
+### ExecutorConfig Objects
+
+Describes an Executor name and data to pass to the Mesos Task
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```name```               | String   | Name of the executor to use for this task. Must match the name of an executor in `custom_executor_config` or Thermos (`AuroraExecutor`). (Default: AuroraExecutor)
+| ```data```               | String   | Data blob to pass on to the executor. (Default: "")
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful\_shutdown\_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+### SlaPolicy Objects
+
+Configuration for specifying custom [SLA requirements](../../features/sla-requirements/) for a job. There are 3 supported SLA policies
+namely, [`CountSlaPolicy`](#countslapolicy-objects), [`PercentageSlaPolicy`](#percentageslapolicy-objects) and [`CoordinatorSlaPolicy`](#coordinatorslapolicy-objects).
+
+
+### CountSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```count```                       | Integer | The number of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### PercentageSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```percentage```                  | Float   | The percentage of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### CoordinatorSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```coordinator_url```             | String  | The URL to the [Coordinator](../../features/sla-requirements/#coordinator) service to be contacted before performing SLA affecting actions (job updates, host drains etc).
+  ```status_key```                  | String  | The field in the Coordinator response that indicates the SLA status for working on the task. (Default: `drain`)
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.21.0/reference/observer-configuration.md b/source/documentation/0.21.0/reference/observer-configuration.md
new file mode 100644
index 0000000..dcaca23
--- /dev/null
+++ b/source/documentation/0.21.0/reference/observer-configuration.md
@@ -0,0 +1,103 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+  --enable_mesos_disk_collector
+                        Delegate per task disk usage collection to agent.
+                        Should be enabled in conjunction to disk isolation in
+                        Mesos-agent. This is not compatible with an
+                        authenticated agent API. [default: False]
+  --agent_api_url=AGENT_API_URL
+                        Mesos Agent API url. [default:
+                        http://localhost:5051/containers]
+  --executor_id_json_path=EXECUTOR_ID_JSON_PATH
+                        `jmespath` to executor_id key in agent response json
+                        object. [default: executor_id]
+  --disk_usage_json_path=DISK_USAGE_JSON_PATH
+                        `jmespath` to disk usage bytes value in agent response
+                        json object. [default: statistics.disk_used_bytes]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.21.0/reference/scheduler-configuration.md b/source/documentation/0.21.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..e0ce476
--- /dev/null
+++ b/source/documentation/0.21.0/reference/scheduler-configuration.md
@@ -0,0 +1,272 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ aurora-scheduler -help
+-------------------------------------------------------------------------
+-h or -help to print this help message
+
+Required flags:
+-backup_dir [not null]
+	Directory to store backups under. Will be created if it does not exist.
+-cluster_name [not null]
+	Name to identify the cluster being served.
+-framework_authentication_file
+	Properties file which contains framework credentials to authenticate with Mesosmaster. Must contain the properties 'aurora_authentication_principal' and 'aurora_authentication_secret'.
+-ip
+	The ip address to listen. If not set, the scheduler will listen on all interfaces.
+-mesos_master_address [not null]
+	Address for the mesos master, can be a socket address or zookeeper path.
+-mesos_role
+	The Mesos role this framework will register as. The default is to left this empty, and the framework will register without any role and only receive unreserved resources in offer.
+-serverset_path [not null, must be non-empty]
+	ZooKeeper ServerSet path to register at.
+-shiro_after_auth_filter
+	Fully qualified class name of the servlet filter to be applied after the shiro auth filters are applied.
+-thermos_executor_path
+	Path to the thermos executor entry point.
+-tier_config [file must be readable]
+	Configuration file defining supported task tiers, task traits and behaviors.
+-webhook_config [file must exist, file must be readable]
+	Path to webhook configuration file.
+-zk_endpoints [must have at least 1 item]
+	Endpoint specification for the ZooKeeper servers.
+
+Optional flags:
+-allow_container_volumes (default false)
+	Allow passing in volumes in the job. Enabling this could pose a privilege escalation threat.
+-allow_docker_parameters (default false)
+	Allow to pass docker container parameters in the job.
+-allow_gpu_resource (default false)
+	Allow jobs to request Mesos GPU resource.
+-allowed_container_types (default [MESOS])
+	Container types that are allowed to be used by jobs.
+-allowed_job_environments (default ^(prod|devel|test|staging\d*)$)
+	Regular expression describing the environments that are allowed to be used by jobs.
+-async_slot_stat_update_interval (default (1, mins))
+	Interval on which to try to update open slot stats.
+-async_task_stat_update_interval (default (1, hrs))
+	Interval on which to try to update resource consumption stats.
+-async_worker_threads (default 8)
+	The number of worker threads to process async task operations with.
+-backup_interval (default (1, hrs))
+	Minimum interval on which to write a storage backup.
+-cron_scheduler_num_threads (default 10)
+	Number of threads to use for the cron scheduler thread pool.
+-cron_scheduling_max_batch_size (default 10) [must be > 0]
+	The maximum number of triggered cron jobs that can be processed in a batch.
+-cron_start_initial_backoff (default (5, secs))
+	Initial backoff delay while waiting for a previous cron run to be killed.
+-cron_start_max_backoff (default (1, mins))
+	Max backoff delay while waiting for a previous cron run to be killed.
+-cron_timezone (default GMT)
+	TimeZone to use for cron predictions.
+-custom_executor_config [file must exist, file must be readable]
+	Path to custom executor settings configuration file.
+-default_docker_parameters (default {})
+	Default docker parameters for any job that does not explicitly declare parameters.
+-dlog_max_entry_size (default (512, KB))
+	Specifies the maximum entry size to append to the log. Larger entries will be split across entry Frames.
+-dlog_shutdown_grace_period (default (2, secs))
+	Specifies the maximum time to wait for scheduled checkpoint and snapshot actions to complete before forcibly shutting down.
+-dlog_snapshot_interval (default (1, hrs))
+	Specifies the frequency at which snapshots of local storage are taken and written to the log.
+-enable_cors_for
+	List of domains for which CORS support should be enabled.
+-enable_mesos_fetcher (default false)
+	Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this feature could pose a privilege escalation threat.
+-enable_preemptor (default true)
+	Enable the preemptor and preemption
+-enable_revocable_cpus (default true)
+	Treat CPUs as a revocable resource.
+-enable_revocable_ram (default false)
+	Treat RAM as a revocable resource.
+-executor_user (default root)
+	User to start the executor. Defaults to "root". Set this to an unprivileged user if the mesos master was started with "--no-root_submissions". If set to anything other than "root", the executor will ignore the "role" setting for jobs since it can't use setuid() anymore. This means that all your jobs will run under the specified user and the user has to exist on the Mesos agents.
+-first_schedule_delay (default (1, ms))
+	Initial amount of time to wait before first attempting to schedule a PENDING task.
+-flapping_task_threshold (default (5, mins))
+	A task that repeatedly runs for less than this time is considered to be flapping.
+-framework_announce_principal (default false)
+	When 'framework_authentication_file' flag is set, the FrameworkInfo registered with the mesos master will also contain the principal. This is necessary if you intend to use mesos authorization via mesos ACLs. The default will change in a future release. Changing this value is backwards incompatible. For details, see MESOS-703.
+-framework_failover_timeout (default (21, days))
+	Time after which a framework is considered deleted.  SHOULD BE VERY HIGH.
+-framework_name (default Aurora)
+	Name used to register the Aurora framework with Mesos.
+-global_container_mounts (default [])
+	A comma separated list of mount points (in host:container form) to mount into all (non-mesos) containers.
+-history_max_per_job_threshold (default 100)
+	Maximum number of terminated tasks to retain in a job history.
+-history_min_retention_threshold (default (1, hrs))
+	Minimum guaranteed time for task history retention before any pruning is attempted.
+-history_prune_threshold (default (2, days))
+	Time after which the scheduler will prune terminated task history.
+-host_maintenance_polling_interval (default (1, minute))
+	Interval between polling for pending host maintenance requests.
+-hostname
+	The hostname to advertise in ZooKeeper instead of the locally-resolved hostname.
+-http_authentication_mechanism (default NONE)
+	HTTP Authentication mechanism to use.
+-http_port (default 0)
+	The port to start an HTTP server on.  Default value will choose a random port.
+-initial_flapping_task_delay (default (30, secs))
+	Initial amount of time to wait before attempting to schedule a flapping task.
+-initial_schedule_penalty (default (1, secs))
+	Initial amount of time to wait before attempting to schedule a task that has failed to schedule.
+-initial_task_kill_retry_interval (default (5, secs))
+	When killing a task, retry after this delay if mesos has not responded, backing off up to transient_task_state_timeout
+-job_update_history_per_job_threshold (default 10)
+	Maximum number of completed job updates to retain in a job update history.
+-job_update_history_pruning_interval (default (15, mins))
+	Job update history pruning interval.
+-job_update_history_pruning_threshold (default (30, days))
+	Time after which the scheduler will prune completed job update history.
+-kerberos_debug (default false)
+	Produce additional Kerberos debugging output.
+-kerberos_server_keytab
+	Path to the server keytab.
+-kerberos_server_principal
+	Kerberos server principal to use, usually of the form HTTP/aurora.example.com@EXAMPLE.COM
+-max_flapping_task_delay (default (5, mins))
+	Maximum delay between attempts to schedule a flapping task.
+-max_leading_duration (default (1, days))
+	After leading for this duration, the scheduler should commit suicide.
+-max_parallel_coordinated_maintenance (default 10)
+	Maximum number of coordinators that can be contacted in parallel.
+-max_registration_delay (default (1, mins))
+	Max allowable delay to allow the driver to register before aborting
+-max_reschedule_task_delay_on_startup (default (30, secs))
+	Upper bound of random delay for pending task rescheduling on scheduler startup.
+-max_saved_backups (default 48)
+	Maximum number of backups to retain before deleting the oldest backups.
+-max_schedule_attempts_per_sec (default 40.0)
+	Maximum number of scheduling attempts to make per second.
+-max_schedule_penalty (default (1, mins))
+	Maximum delay between attempts to schedule a PENDING tasks.
+-max_sla_duration_secs (default (2, hrs))
+	Maximum duration window for which SLA requirements are to be satisfied. This does not apply to jobs that have a CoordinatorSlaPolicy.
+-max_status_update_batch_size (default 1000) [must be > 0]
+	The maximum number of status updates that can be processed in a batch.
+-max_task_event_batch_size (default 300) [must be > 0]
+	The maximum number of task state change events that can be processed in a batch.
+-max_tasks_per_job (default 4000) [must be > 0]
+	Maximum number of allowed tasks in a single job.
+-max_tasks_per_schedule_attempt (default 5) [must be > 0]
+	The maximum number of tasks to pick in a single scheduling attempt.
+-max_update_instance_failures (default 20000) [must be > 0]
+	Upper limit on the number of failures allowed during a job update. This helps cap potentially unbounded entries into storage.
+-min_offer_hold_time (default (5, mins))
+	Minimum amount of time to hold a resource offer before declining.
+-min_required_instances_for_sla_check (default 20)
+	Minimum number of instances required for a job to be eligible for SLA check. This does not apply to jobs that have a CoordinatorSlaPolicy.
+-native_log_election_retries (default 20)
+	The maximum number of attempts to obtain a new log writer.
+-native_log_election_timeout (default (15, secs))
+	The timeout for a single attempt to obtain a new log writer.
+-native_log_file_path
+	Path to a file to store the native log data in.  If the parent directory doesnot exist it will be created.
+-native_log_quorum_size (default 1)
+	The size of the quorum required for all log mutations.
+-native_log_read_timeout (default (5, secs))
+	The timeout for doing log reads.
+-native_log_write_timeout (default (3, secs))
+	The timeout for doing log appends and truncations.
+-native_log_zk_group_path
+	A zookeeper node for use by the native log to track the master coordinator.
+-offer_filter_duration (default (5, secs))
+	Duration after which we expect Mesos to re-offer unused resources. A short duration improves scheduling performance in smaller clusters, but might lead to resource starvation for other frameworks if you run many frameworks in your cluster.
+-offer_hold_jitter_window (default (1, mins))
+	Maximum amount of random jitter to add to the offer hold time window.
+-offer_reservation_duration (default (3, mins))
+	Time to reserve a agent's offers while trying to satisfy a task preempting another.
+-offer_set_module (default [class org.apache.aurora.scheduler.offers.OfferSetModule])
+  Guice module for replacing offer holding and scheduling logic.
+-partition_aware (default false)
+  Whether or not to integrate with the partition-aware Mesos capabilities.
+-populate_discovery_info (default false)
+	If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+-preemption_delay (default (3, mins))
+	Time interval after which a pending task becomes eligible to preempt other tasks
+-preemption_slot_finder_modules (default [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule])
+  Guice modules for replacing preemption logic.
+-preemption_slot_hold_time (default (5, mins))
+	Time to hold a preemption slot found before it is discarded.
+-preemption_slot_search_interval (default (1, mins))
+	Time interval between pending task preemption slot searches.
+-receive_revocable_resources (default false)
+	Allows receiving revocable resource offers from Mesos.
+-reconciliation_explicit_batch_interval (default (5, secs))
+	Interval between explicit batch reconciliation requests.
+-reconciliation_explicit_batch_size (default 1000) [must be > 0]
+	Number of tasks in a single batch request sent to Mesos for explicit reconciliation.
+-reconciliation_explicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to scheduler.
+-reconciliation_implicit_interval (default (60, mins))
+	Interval on which scheduler will ask Mesos for status updates of all non-terminal tasks known to Mesos.
+-reconciliation_initial_delay (default (1, mins))
+	Initial amount of time to delay task reconciliation after scheduler start up.
+-reconciliation_schedule_spread (default (30, mins))
+	Difference between explicit and implicit reconciliation intervals intended to create a non-overlapping task reconciliation schedule.
+-require_docker_use_executor (default true)
+	If false, Docker tasks may run without an executor (EXPERIMENTAL)
+-scheduling_max_batch_size (default 3) [must be > 0]
+	The maximum number of scheduling attempts that can be processed in a batch.
+-serverset_endpoint_name (default http)
+	Name of the scheduler endpoint published in ZooKeeper.
+-shiro_ini_path
+	Path to shiro.ini for authentication and authorization configuration.
+-shiro_realm_modules (default [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule])
+	Guice modules for configuring Shiro Realms.
+-sla_aware_action_max_batch_size (default 300) [must be > 0]
+	The maximum number of sla aware update actions that can be processed in a batch.
+-sla_aware_kill_retry_min_delay (default (1, min)) [must be > 0]
+	The minimum amount of time to wait before retrying an SLA-aware kill (using a truncated binary backoff).
+-sla_aware_kill_retry_max_delay (default (5, min)) [must be > 0]
+	The maximum amount of time to wait before retrying an SLA-aware kill (using a truncated binary backoff).
+-sla_coordinator_timeout (default (1, min)) [must be > 0]
+	Timeout interval for communicating with Coordinator.
+-sla_non_prod_metrics (default [])
+	Metric categories collected for non production tasks.
+-sla_prod_metrics (default [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS])
+	Metric categories collected for production tasks.
+-sla_stat_refresh_interval (default (1, mins))
+	The SLA stat refresh interval.
+-stat_retention_period (default (1, hrs))
+	Time for a stat to be retained in memory before expiring.
+-stat_sampling_interval (default (1, secs))
+	Statistic value sampling interval.
+-task_assigner_modules (default [class org.apache.aurora.scheduler.scheduling.TaskAssignerImplModule])
+  Guice modules for replacing task assignment logic.
+-thermos_executor_cpu (default 0.25)
+	The number of CPU cores to allocate for each instance of the executor.
+-thermos_executor_flags
+	Extra arguments to be passed to the thermos executor
+-thermos_executor_ram (default (128, MB))
+	The amount of RAM to allocate for each instance of the executor.
+-thermos_executor_resources (default [])
+	A comma separated list of additional resources to copy into the sandbox.Note: if thermos_executor_path is not the thermos_executor.pex file itself, this must include it.
+-thermos_home_in_sandbox (default false)
+	If true, changes HOME to the sandbox before running the executor. This primarily has the effect of causing the executor and runner to extract themselves into the sandbox.
+-thrift_method_interceptor_modules (default [])
+	Additional Guice modules for intercepting Thrift method calls.
+-transient_task_state_timeout (default (5, mins))
+	The amount of time after which to treat a task stuck in a transient state as LOST.
+-viz_job_url_prefix (default )
+	URL prefix for job container stats.
+-zk_chroot_path
+	chroot path to use for the ZooKeeper connections
+-zk_digest_credentials
+	user:password to use when authenticating with ZooKeeper.
+-zk_in_proc (default false)
+	Launches an embedded zookeeper server for local testing causing -zk_endpoints to be ignored if specified.
+-zk_session_timeout (default (4, secs))
+	The ZooKeeper session timeout.
+-zk_use_curator (default true)
+	DEPRECATED: Uses Apache Curator as the zookeeper client; otherwise a copy of Twitter commons/zookeeper (the legacy library) is used.
+-------------------------------------------------------------------------
+```
diff --git a/source/documentation/0.21.0/reference/scheduler-endpoints.md b/source/documentation/0.21.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.21.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.21.0/reference/task-lifecycle.md b/source/documentation/0.21.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..b4642b3
--- /dev/null
+++ b/source/documentation/0.21.0/reference/task-lifecycle.md
@@ -0,0 +1,175 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+
+## RUNNING to PARTITIONED states
+If Aurora is configured to enable partition awareness, a task which is in a
+running state can transition to `PARTITIONED`. This happens when the state
+of the task in Mesos becomes unknown. By default Aurora errs on the side of
+availability, so all tasks that transition to `PARTITIONED` are immediately
+transitioned to `LOST`.
+
+This policy is not ideal for all types of workloads you may wish to run in
+your Aurora cluster, e.g. for jobs where task failures are very expensive.
+So job owners may set their own `PartitionPolicy` where they can control
+how long to remain in `PARTITIONED` before transitioning to `LOST`. Or they
+can disable any automatic attempts to `reschedule` when in `PARTITIONED`,
+effectively waiting out the partition for as long as possible.
+
+
+## PARTITIONED and transient states
+The `PartitionPolicy` provided by users only applies to tasks which are
+currently running. When tasks are moving in and out of transient states,
+e.g. tasks being updated, restarted, preempted, etc., `PARTITIONED` tasks
+are moved immediately to `LOST`. This prevents situations where system
+or user-initiated actions are blocked indefinitely waiting for partitions
+to resolve (that may never be resolved).
+
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.22.0/additional-resources/presentations.md b/source/documentation/0.22.0/additional-resources/presentations.md
new file mode 100644
index 0000000..03028bd
--- /dev/null
+++ b/source/documentation/0.22.0/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.22.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/0.22.0/additional-resources/tools.md b/source/documentation/0.22.0/additional-resources/tools.md
new file mode 100644
index 0000000..262d16d
--- /dev/null
+++ b/source/documentation/0.22.0/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/paypal/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/0.22.0/contributing.md b/source/documentation/0.22.0/contributing.md
new file mode 100644
index 0000000..76d4cef
--- /dev/null
+++ b/source/documentation/0.22.0/contributing.md
@@ -0,0 +1,93 @@
+## Get the Source Code
+
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
+
+## Getting your ReviewBoard Account
+
+Go to https://reviews.apache.org and create an account.
+
+## Setting up your ReviewBoard Environment
+
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+## Submitting a Patch for Review
+
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+If you're unsure about who to add as a reviewer, you can default to adding Stephan Erb (StephanErb) and
+Renan DelValle (rdelvalle). They will take care of finding an appropriate reviewer for the patch.
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+## Updating an Existing Review
+
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+### Merging Someone Else's Review
+
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.22.0/development/client.md b/source/documentation/0.22.0/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/0.22.0/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/0.22.0/development/committers-guide.md b/source/documentation/0.22.0/development/committers-guide.md
new file mode 100644
index 0000000..aadf254
--- /dev/null
+++ b/source/documentation/0.22.0/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/0.22.0/development/db-migration.md b/source/documentation/0.22.0/development/db-migration.md
new file mode 100644
index 0000000..eb70400
--- /dev/null
+++ b/source/documentation/0.22.0/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.22.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/0.22.0/development/design-documents.md b/source/documentation/0.22.0/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/0.22.0/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/0.22.0/development/design/command-hooks.md b/source/documentation/0.22.0/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.22.0/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.22.0/development/scheduler.md b/source/documentation/0.22.0/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/0.22.0/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/0.22.0/development/thermos.md b/source/documentation/0.22.0/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/0.22.0/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.22.0/development/thrift.md b/source/documentation/0.22.0/development/thrift.md
new file mode 100644
index 0000000..81b74b9
--- /dev/null
+++ b/source/documentation/0.22.0/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/0.22.0/development/ui.md b/source/documentation/0.22.0/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/0.22.0/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/0.22.0/features/constraints.md b/source/documentation/0.22.0/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/0.22.0/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/0.22.0/features/containers.md b/source/documentation/0.22.0/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/0.22.0/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/0.22.0/features/cron-jobs.md b/source/documentation/0.22.0/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/0.22.0/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.22.0/features/custom-executors.md b/source/documentation/0.22.0/features/custom-executors.md
new file mode 100644
index 0000000..da3f2a2
--- /dev/null
+++ b/source/documentation/0.22.0/features/custom-executors.md
@@ -0,0 +1,166 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+To launch tasks using a custom executor,
+an [ExecutorConfig](../../reference/configuration/#executorconfig-objects) object must be added to
+the Job or Service object. The `name` parameter of ExecutorConfig must match the name of an executor
+defined in the JSON object provided to the scheduler at startup time.
+
+For example, if we desire to launch tasks using `myExecutor` (defined above), we may do so in
+the following manner:
+
+```
+jobs = [Service(
+  task = task,
+  cluster = 'devcluster',
+  role = 'www-data',
+  environment = 'prod',
+  name = 'hello',
+  executor_config = ExecutorConfig(name='myExecutor'))]
+```
+
+This will create a Service Job which will launch tasks using myExecutor instead of Thermos.
diff --git a/source/documentation/0.22.0/features/job-updates.md b/source/documentation/0.22.0/features/job-updates.md
new file mode 100644
index 0000000..a532e2e
--- /dev/null
+++ b/source/documentation/0.22.0/features/job-updates.md
@@ -0,0 +1,123 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+SLA-Aware Updates
+-----------------
+
+Updates can take advantage of [Custom SLA Requirements](../../features/sla-requirements/) and
+specify the `sla_aware=True` option within
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) to only update instances if
+the action will maintain the task's SLA requirements. This feature allows updates to avoid killing
+too many instances in the face of unexpected failures outside of the update range.
+
+See the [Using the `sla_aware` option](../../reference/configuration/#using-the-sla-aware-option)
+for more information on how to use this feature.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/0.22.0/features/mesos-fetcher.md b/source/documentation/0.22.0/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/0.22.0/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/0.22.0/features/multitenancy.md b/source/documentation/0.22.0/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/0.22.0/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/0.22.0/features/resource-isolation.md b/source/documentation/0.22.0/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/0.22.0/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/0.22.0/features/service-discovery.md b/source/documentation/0.22.0/features/service-discovery.md
new file mode 100644
index 0000000..b35a569
--- /dev/null
+++ b/source/documentation/0.22.0/features/service-discovery.md
@@ -0,0 +1,43 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports. To have this working properly it's needed to
+  add `-populate_discovery_info` to scheduler's configuration.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/0.22.0/features/services.md b/source/documentation/0.22.0/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/0.22.0/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/0.22.0/features/sla-metrics.md b/source/documentation/0.22.0/features/sla-metrics.md
new file mode 100644
index 0000000..3b40e02
--- /dev/null
+++ b/source/documentation/0.22.0/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.22.0/features/sla-requirements.md b/source/documentation/0.22.0/features/sla-requirements.md
new file mode 100644
index 0000000..ee876f3
--- /dev/null
+++ b/source/documentation/0.22.0/features/sla-requirements.md
@@ -0,0 +1,185 @@
+SLA Requirements
+================
+
+- [Overview](#overview)
+- [Default SLA](#default-sla)
+- [Custom SLA](#custom-sla)
+  - [Count-based](#count-based)
+  - [Percentage-based](#percentage-based)
+  - [Coordinator-based](#coordinator-based)
+
+## Overview
+
+Aurora guarantees SLA requirements for jobs. These requirements limit the impact of cluster-wide
+maintenance operations on the jobs. For instance, when an operator upgrades
+the OS on all the Mesos agent machines, the tasks scheduled on them needs to be drained.
+By specifying the SLA requirements a job can make sure that it has enough instances to
+continue operating safely without incurring downtime.
+
+> SLA is defined as minimum number of active tasks required for a job every duration window.
+A task is active if it was in `RUNNING` state during the last duration window.
+
+There is a [default](#default-sla) SLA guarantee for
+[preferred](../../features/multitenancy/#configuration-tiers) tier jobs and it is also possible to
+specify [custom](#custom-sla) SLA requirements.
+
+## Default SLA
+
+Aurora guarantees a default SLA requirement for tasks in
+[preferred](../../features/multitenancy/#configuration-tiers) tier.
+
+> 95% of tasks in a job will be `active` for every 30 mins.
+
+
+## Custom SLA
+
+For jobs that require different SLA requirements, Aurora allows jobs to specify their own
+SLA requirements via the `SlaPolicies`. There are 3 different ways to express SLA requirements.
+
+### [Count-based](../../reference/configuration/#countslapolicy-objects)
+
+For jobs that need a minimum `number` of instances to be running all the time,
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects)
+provides the ability to express the minimum number of required active instances (i.e. number of
+tasks that are `RUNNING` for at least `duration_secs`). For instance, if we have a
+`replicated-service` that has 3 instances and needs at least 2 instances every 30 minutes to be
+treated healthy, the SLA requirement can be expressed with a
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'replicated-service',
+  role = 'www-data',
+  instances = 3,
+  sla_policy = CountSlaPolicy(
+    count = 2,
+    duration_secs = 1800
+  )
+  ...
+)
+```
+
+### [Percentage-based](../../reference/configuration/#percentageslapolicy-objects)
+
+For jobs that need a minimum `percentage` of instances to be running all the time,
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) provides the
+ability to express the minimum percentage of required active instances (i.e. percentage of tasks
+that are `RUNNING` for at least `duration_secs`). For instance, if we have a `webservice` that
+has 10000 instances for handling peak load and cannot have more than 0.1% of the instances down
+for every 1 hr, the SLA requirement can be expressed with a
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'frontend-service',
+  role = 'www-data',
+  instances = 10000,
+  sla_policy = PercentageSlaPolicy(
+    percentage = 99.9,
+    duration_secs = 3600
+  )
+  ...
+)
+```
+
+### [Coordinator-based](../../reference/configuration/#coordinatorslapolicy-objects)
+
+When none of the above methods are enough to describe the SLA requirements for a job, then the SLA
+calculation can be off-loaded to a custom service called the `Coordinator`. The `Coordinator` needs
+to expose an endpoint that will be called to check if removal of a task will affect the SLA
+requirements for the job. This is useful to control the number of tasks that undergoes maintenance
+at a time, without affected the SLA for the application.
+
+Consider the example, where we have a `storage-service` stores 2 replicas of an object. Each replica
+is distributed across the instances, such that replicas are stored on different hosts. In addition
+a consistent-hash is used for distributing the data across the instances.
+
+When an instance needs to be drained (say for host-maintenance), we have to make sure that at least 1 of
+the 2 replicas remains available. In such a case, a `Coordinator` service can be used to maintain
+the SLA guarantees required for the job.
+
+The job can be configured with a
+[`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) to specify the
+coordinator endpoint and the field in the response JSON that indicates if the SLA will be affected
+or not affected, when the task is removed.
+
+```python
+Job(
+  name = 'storage-service',
+  role = 'www-data',
+  sla_policy = CoordinatorSlaPolicy(
+    coordinator_url = 'http://coordinator.example.com',
+    status_key = 'drain'
+  )
+  ...
+)
+```
+
+
+#### Coordinator Interface [Experimental]
+
+When a [`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) is
+specified for a job, any action that requires removing a task
+(such as drains) will be required to get approval from the `Coordinator` before proceeding. The
+coordinator service needs to expose a HTTP endpoint, that can take a `task-key` param
+(`<cluster>/<role>/<env>/<name>/<instance>`) and a json body describing the task
+details, force maintenance countdown (ms) and other params and return a response json that will
+contain the boolean status for allowing or disallowing the task's removal.
+
+##### Request:
+```javascript
+POST /
+  ?task=<cluster>/<role>/<env>/<name>/<instance>
+
+{
+  "forceMaintenanceCountdownMs": "604755646",
+  "task": "cluster/role/devel/job/1",
+  "taskConfig": {
+    "assignedTask": {
+      "taskId": "taskA",
+      "slaveHost": "a",
+      "task": {
+        "job": {
+          "role": "role",
+          "environment": "devel",
+          "name": "job"
+        },
+        ...
+      },
+      "assignedPorts": {
+        "http": 1000
+      },
+      "instanceId": 1
+      ...
+    },
+    ...
+  }
+}
+```
+
+##### Response:
+```json
+{
+  "drain": true
+}
+```
+
+If Coordinator allows removal of the task, then the task’s
+[termination lifecycle](../../reference/configuration/#httplifecycleconfig-objects)
+is triggered. If Coordinator does not allow removal, then the request will be retried again in the
+future.
+
+#### Coordinator Actions
+
+Coordinator endpoint get its own lock and this is used to serializes calls to the Coordinator.
+It guarantees that only one concurrent request is sent to a coordinator endpoint. This allows
+coordinators to simply look the current state of the tasks to determine its SLA (without having
+to worry about in-flight and pending requests). However if there are multiple coordinators,
+maintenance can be done in parallel across all the coordinators.
+
+_Note: Single concurrent request to a coordinator endpoint does not translate as exactly-once
+guarantee. The coordinator must be able to handle duplicate drain
+requests for the same task._
+
+
+
diff --git a/source/documentation/0.22.0/features/webhooks.md b/source/documentation/0.22.0/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/0.22.0/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/0.22.0/getting-started/overview.md b/source/documentation/0.22.0/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/0.22.0/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/0.22.0/getting-started/tutorial.md b/source/documentation/0.22.0/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/0.22.0/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/0.22.0/getting-started/vagrant.md b/source/documentation/0.22.0/getting-started/vagrant.md
new file mode 100644
index 0000000..e4d4168
--- /dev/null
+++ b/source/documentation/0.22.0/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/aurora/scheduler.log` or `sudo journalctl -u aurora-scheduler`
+* Observer: `/var/log/thermos/observer.log` or `sudo journalctl -u thermos-observer`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.22.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.22.0/images/CPUavailability.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/CompletedTasks.png b/source/documentation/0.22.0/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/0.22.0/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/HelloWorldJob.png b/source/documentation/0.22.0/images/HelloWorldJob.png
new file mode 100644
index 0000000..4977227
--- /dev/null
+++ b/source/documentation/0.22.0/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/RoleJobs.png b/source/documentation/0.22.0/images/RoleJobs.png
new file mode 100644
index 0000000..95ffa49
--- /dev/null
+++ b/source/documentation/0.22.0/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/RunningJob.png b/source/documentation/0.22.0/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/0.22.0/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/ScheduledJobs.png b/source/documentation/0.22.0/images/ScheduledJobs.png
new file mode 100644
index 0000000..0be4731
--- /dev/null
+++ b/source/documentation/0.22.0/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/TaskBreakdown.png b/source/documentation/0.22.0/images/TaskBreakdown.png
new file mode 100644
index 0000000..fab702c
--- /dev/null
+++ b/source/documentation/0.22.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.22.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.22.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/0.22.0/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/0.22.0/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/components.odg b/source/documentation/0.22.0/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/0.22.0/images/components.odg
Binary files differ
diff --git a/source/documentation/0.22.0/images/components.png b/source/documentation/0.22.0/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/0.22.0/images/components.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/debug-client-test.png b/source/documentation/0.22.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.22.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/debugging-client-test.png b/source/documentation/0.22.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.22.0/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/killedtask.png b/source/documentation/0.22.0/images/killedtask.png
new file mode 100644
index 0000000..bb1bc92
--- /dev/null
+++ b/source/documentation/0.22.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.22.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.22.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.22.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/0.22.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.22.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.22.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.22.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/0.22.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/0.22.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/0.22.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/0.22.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/0.22.0/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/runningtask.png b/source/documentation/0.22.0/images/runningtask.png
new file mode 100644
index 0000000..9e7e31a
--- /dev/null
+++ b/source/documentation/0.22.0/images/runningtask.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/stderr.png b/source/documentation/0.22.0/images/stderr.png
new file mode 100644
index 0000000..6076a84
--- /dev/null
+++ b/source/documentation/0.22.0/images/stderr.png
Binary files differ
diff --git a/source/documentation/0.22.0/images/stdout.png b/source/documentation/0.22.0/images/stdout.png
new file mode 100644
index 0000000..d614097
--- /dev/null
+++ b/source/documentation/0.22.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.22.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.22.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.22.0/index.html.md b/source/documentation/0.22.0/index.html.md
new file mode 100644
index 0000000..fe770d6
--- /dev/null
+++ b/source/documentation/0.22.0/index.html.md
@@ -0,0 +1,80 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [SLA Requirements](features/sla-requirements/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/0.22.0/operations/backup-restore.md b/source/documentation/0.22.0/operations/backup-restore.md
new file mode 100644
index 0000000..2230614
--- /dev/null
+++ b/source/documentation/0.22.0/operations/backup-restore.md
@@ -0,0 +1,80 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+##  Preparation
+
+* Stop all scheduler instances.
+
+* Pick a backup to use for rehydrating the mesos-replicated log. Backups can be found in the
+directory given to the scheduler as the `-backup_dir` argument. Backups are stored in the format
+`scheduler-backup-<yyyy-MM-dd-HH-mm>`.
+
+* If running the Aurora Scheduler in HA mode, pick a single scheduler instance to rehydrate.
+
+* Locate the `recovery-tool` in your setup. If Aurora was installed using a Debian package
+generated by our `aurora-packaging` script, the recovery tool can be found
+in `/usr/share/aurora/bin/recovery-tool`.
+
+## Cleanup
+
+* Delete (or move) the Mesos replicated log path for each scheduler instance. The location of the
+Mesos replicated log file path can be found by looking at the value given to the flag
+`-native_log_file_path` for each instance.
+
+* Initialize the Mesos replicated log files using the mesos-log tool:
+```
+sudo su -u <USER> mesos-log initialize --path=<native_log_file_path>
+```
+Where `USER` is the user under which the scheduler instance will be run. For installations using
+Debian packages, the default user will be `aurora`. You may alternatively choose to specify
+a group as well by passing the `-g <GROUP>` option to `su`.
+Note that if the user under which the Aurora scheduler instance is run _does not_ have permissions
+to read this directory and the files it contains, the instance will fail to start.
+
+## Restore from backup
+
+* Run the `recovery-tool`. Wherever the flags match those used for the scheduler instance,
+use the same values:
+```
+$ recovery-tool -from BACKUP \
+-to LOG \
+-backup=<selected_backup_location> \
+-native_log_zk_group_path=<native_log_zk_group_path> \
+-native_log_file_path=<native_log_file_path> \
+-zk_endpoints=<zk_endpoints>
+```
+
+## Bring scheduler instances back online
+
+### If running in HA Mode
+
+* Start the rehydrated scheduler instance along with enough cleaned up instances to
+meet the `-native_log_quorum_size`. The mesos-replicated log algorithm will replenish
+the "blank" scheduler instances with the information from the rehydrated instance.
+
+* Start any remaining scheduler instances.
+
+### If running in singleton mode
+
+* Start the single scheduler instance.
+
+
diff --git a/source/documentation/0.22.0/operations/configuration.md b/source/documentation/0.22.0/operations/configuration.md
new file mode 100644
index 0000000..ac5f226
--- /dev/null
+++ b/source/documentation/0.22.0/operations/configuration.md
@@ -0,0 +1,380 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    -enable_update_affinity=true
+    -update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    -max_schedule_attempts_per_sec=40
+    -initial_schedule_penalty=1secs
+    -max_schedule_penalty=1mins
+    -scheduling_max_batch_size=3
+    -max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
+
+## Cluster Maintenance
+
+Aurora performs maintenance related task drains. One of the scheduler options that can control
+how often the scheduler polls for maintenance work can be controlled via,
+
+    -host_maintenance_polling_interval=1min
+
+## Enforcing SLA limitations
+
+Since tasks can specify their own `SLAPolicy`, the cluster needs to limit these SLA requirements.
+Too aggressive a requirement can permanently block any type of maintenance work
+(ex: OS/Kernel/Security upgrades) on a host and hold it hostage.
+
+An operator can control the limits for SLA requirements via these scheduler configuration options:
+
+    -max_sla_duration_secs=2hrs
+    -min_required_instances_for_sla_check=20
+
+_Note: These limits only apply for `CountSlaPolicy` and `PercentageSlaPolicy`._
+
+### Limiting Coordinator SLA
+
+With `CoordinatorSlaPolicy` the SLA calculation is off-loaded to an external HTTP service. Some
+relevant scheduler configuration options are,
+
+    -sla_coordinator_timeout=1min
+    -max_parallel_coordinated_maintenance=10
+
+Since handing off the SLA calculation to an external service can potentially block maintenance
+on hosts for an indefinite amount of time (either due to a mis-configured coordinator or due to
+a valid degraded service). In those situations the following metrics will be helpful to identify the
+offending tasks.
+
+    sla_coordinator_user_errors_*     (counter tracking number of times the coordinator for the task
+                                       returned a bad response.)
+    sla_coordinator_errors_*          (counter tracking number of times the scheduler was not able
+                                       to communicate with the coordinator of the task.)
+    sla_coordinator_lock_starvation_* (counter tracking number of times the scheduler was not able to
+                                       get the lock for the coordinator of the task.)
+
diff --git a/source/documentation/0.22.0/operations/installation.md b/source/documentation/0.22.0/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/0.22.0/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/0.22.0/operations/monitoring.md b/source/documentation/0.22.0/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/0.22.0/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.22.0/operations/security.md b/source/documentation/0.22.0/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/0.22.0/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.22.0/operations/storage.md b/source/documentation/0.22.0/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/0.22.0/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/0.22.0/operations/troubleshooting.md b/source/documentation/0.22.0/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/0.22.0/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/0.22.0/operations/upgrades.md b/source/documentation/0.22.0/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/0.22.0/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/0.22.0/reference/client-cluster-configuration.md b/source/documentation/0.22.0/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/0.22.0/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/0.22.0/reference/client-commands.md b/source/documentation/0.22.0/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/0.22.0/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.22.0/reference/client-hooks.md b/source/documentation/0.22.0/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/0.22.0/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/0.22.0/reference/configuration-best-practices.md b/source/documentation/0.22.0/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/0.22.0/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.22.0/reference/configuration-templating.md b/source/documentation/0.22.0/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/0.22.0/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/0.22.0/reference/configuration-tutorial.md b/source/documentation/0.22.0/reference/configuration-tutorial.md
new file mode 100644
index 0000000..79705de
--- /dev/null
+++ b/source/documentation/0.22.0/reference/configuration-tutorial.md
@@ -0,0 +1,533 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+        name = 'setup',
+        cmdline = (
+            'cat <<EOF > .thermos_profile\n'
+            'export RESULT=hello\n'
+            'EOF\n'
+        )
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.22.0/reference/configuration.md b/source/documentation/0.22.0/reference/configuration.md
new file mode 100644
index 0000000..88f50ae
--- /dev/null
+++ b/source/documentation/0.22.0/reference/configuration.md
@@ -0,0 +1,737 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+    - [SlaPolicy Objects](#slapolicy-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL\_EXISTING Kill the previous run, and schedule the new run CANCEL\_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+  ```partition_policy``` | ```PartitionPolicy``` object | An optional partition policy that allows job owners to define how to handle partitions for running tasks (in partition-aware Aurora clusters)
+  ```metadata``` | list of ```Metadata``` objects | list of ```Metadata``` objects for user's customized metadata information.
+  ```executor_config``` | ```ExecutorConfig``` object | Allows choosing an alternative executor defined in `custom_executor_config` to be used instead of Thermos. Tasks will be launched with Thermos as the executor by default. See [Custom Executors](../../features/custom-executors/) for more info.
+  ```sla_policy``` |  Choice of ```CountSlaPolicy```, ```PercentageSlaPolicy``` or ```CoordinatorSlaPolicy``` object | An optional SLA policy that allows job owners to describe the SLA requirements for the job. See [SlaPolicy Objects](#slapolicy-objects) for more information.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+| ```update_strategy```        | Choice of ```QueueUpdateStrategy```,  ```BatchUpdateStrategy```, or ```VariableBatchUpdateStrategy``` object | Indicate which update strategy to use for this update.
+| ```sla_aware```              | boolean  | When True, updates will only update an instance if it does not break the task's specified [SLA Requirements](../../features/sla-requirements/). (Default: None)
+
+### QueueUpdateStrategy Objects
+
+Update strategy which will keep the active updating instances at size ```batch_size``` throughout the update until there are no more instances left to update.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+
+### BatchUpdateStrategy Objects
+
+Update strategy which will wait until a maximum of ``batch_size`` number of instances are updated before continuing on to the next group until all instances are updated.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```autopause_after_batch```             | Boolean  | Automatically pauses update after completing a batch. (Default: False)
+
+### VariableBatchUpdateStrategy Objects
+
+Similar to Batch Update strategy, this strategy will wait until all instances in a current group are
+updated before updating more instances. However, instead of maintaining a static group size, the
+size of each group may change as the update progresses. For example, an update which modifies a
+total of 10 instances may be done in batch sizes of 2, 3, and 5. If the number of instances to
+be updated are greater than the sum of the groups, the last group size will be used in
+perpetuity until all instances are updated. Following the previous example, if instead of 10
+instances 20 instances are modified, the update groups would become: 2, 3, 5, 5, 5.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_sizes```             | List(Integer)  | Maximum number of shards to be updated per iteration. As each iteration completes, the next iteration's group size may change. If there are still instances that need to be updated after all sizes are used, the last size will be reused for the remainder of the update.
+| ```autopause_after_batch```             | Boolean  | Automatically pauses update before starting a new batch. (Default: False)
+
+#### Using the `sla_aware` option
+
+There are some nuances around the `sla_aware` option that users should be aware of:
+
+- SLA-aware updates work in tandem with maintenance. Draining a host that has an instance of the
+job being updated affects the SLA and thus will be taken into account when the update determines
+whether or not it is safe to update another instance.
+- SLA-aware updates will use the [SLAPolicy](../../features/sla-requirements/#custom-sla) of the
+*newest* configuration when determining whether or not it is safe to update an instance. For
+example, if the current configuration specifies a
+[PercentageSlaPolicy](../../features/sla-requirements/#percentageslapolicy-objects) that allows for
+5% of instances to be down and the updated configuration increaes this value to 10%, the SLA
+calculation will be done using the 10% policy. Be mindful of this when doing an update that
+modifies the `SLAPolicy` since it may be possible to put the old configuration in a bad state
+that the new configuration would not be affected by. Additionally, if the update is rolled back,
+then the rollback will use the old `SLAPolicy` (or none if there was not one previously).
+- If using the [CoordinatorSlaPolicy](../../features/sla-requirements/#coordinatorslapolicy-objects),
+it is important to pay attention to the `batch_size` of the update. If you have a complex SLA
+requirement, then you may be limiting the throughput of your updates with an insufficient
+`batch_size`. For example, imagine you have a job with 9 instance that represents three
+replicated caches, and you can only update one instance per replica set: `[0 1 2]
+[3 4 5] [6 7 8]` (the number indicates the instance ID and the brackets represent replica
+sets). If your `batch_size` is 3, then you will slowly update one replica set at a time. If your
+`batch_size` is 9, then you can update all replica sets in parallel and thus speeding up the update.
+- If an instance fails an SLA check for an update, then it will be rechecked starting at a delay
+from `sla_aware_kill_retry_min_delay` and exponentially increasing up to
+`sla_aware_kill_retry_max_delay`. These are cluster-operator set values.
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+### PartitionPolicy Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```reschedule```               | Boolean   | Whether or not to reschedule when running tasks become partitioned (Default: True)
+| ```delay_secs```               | Integer   | How long to delay transitioning to LOST when running tasks are partitioned. (Default: 0)
+
+### Metadata Objects
+
+Describes a piece of user metadata in a key value pair
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```key```        | String          | Indicate which metadata the user provides
+  ```value```      | String          | Provide the metadata content for corresponding key
+
+### ExecutorConfig Objects
+
+Describes an Executor name and data to pass to the Mesos Task
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```name```               | String   | Name of the executor to use for this task. Must match the name of an executor in `custom_executor_config` or Thermos (`AuroraExecutor`). (Default: AuroraExecutor)
+| ```data```               | String   | Data blob to pass on to the executor. (Default: "")
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful\_shutdown\_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+### SlaPolicy Objects
+
+Configuration for specifying custom [SLA requirements](../../features/sla-requirements/) for a job. There are 3 supported SLA policies
+namely, [`CountSlaPolicy`](#countslapolicy-objects), [`PercentageSlaPolicy`](#percentageslapolicy-objects) and [`CoordinatorSlaPolicy`](#coordinatorslapolicy-objects).
+
+
+### CountSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```count```                       | Integer | The number of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### PercentageSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```percentage```                  | Float   | The percentage of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### CoordinatorSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```coordinator_url```             | String  | The URL to the [Coordinator](../../features/sla-requirements/#coordinator) service to be contacted before performing SLA affecting actions (job updates, host drains etc).
+  ```status_key```                  | String  | The field in the Coordinator response that indicates the SLA status for working on the task. (Default: `drain`)
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/0.22.0/reference/observer-configuration.md b/source/documentation/0.22.0/reference/observer-configuration.md
new file mode 100644
index 0000000..5b9ce2b
--- /dev/null
+++ b/source/documentation/0.22.0/reference/observer-configuration.md
@@ -0,0 +1,108 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --disable_task_resource_collection
+                        Disable collection of CPU and memory statistics for
+                        each active task. Those can be expensive to collect if
+                        there are hundreds of active tasks per host. [default:
+                        False]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+  --enable_mesos_disk_collector
+                        Delegate per task disk usage collection to agent.
+                        Should be enabled in conjunction to disk isolation in
+                        Mesos-agent. This is not compatible with an
+                        authenticated agent API. [default: False]
+  --agent_api_url=AGENT_API_URL
+                        Mesos Agent API url. [default:
+                        http://localhost:5051/containers]
+  --executor_id_json_path=EXECUTOR_ID_JSON_PATH
+                        `jmespath` to executor_id key in agent response json
+                        object. [default: executor_id]
+  --disk_usage_json_path=DISK_USAGE_JSON_PATH
+                        `jmespath` to disk usage bytes value in agent response
+                        json object. [default: statistics.disk_used_bytes]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/0.22.0/reference/scheduler-configuration.md b/source/documentation/0.22.0/reference/scheduler-configuration.md
new file mode 100644
index 0000000..2d30133
--- /dev/null
+++ b/source/documentation/0.22.0/reference/scheduler-configuration.md
@@ -0,0 +1,476 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+Usage: org.apache.aurora.scheduler.app.SchedulerMain [options]
+  Options:
+    -allow_container_volumes
+      Allow passing in volumes in the job. Enabling this could pose a
+      privilege escalation threat.
+      Default: false
+    -allow_docker_parameters
+      Allow to pass docker container parameters in the job.
+      Default: false
+    -allow_gpu_resource
+      Allow jobs to request Mesos GPU resource.
+      Default: false
+    -allowed_container_types
+      Container types that are allowed to be used by jobs.
+      Default: [MESOS]
+    -allowed_job_environments
+      Regular expression describing the environments that are allowed to be
+      used by jobs.
+      Default: ^(prod|devel|test|staging\d*)$
+    -async_slot_stat_update_interval
+      Interval on which to try to update open slot stats.
+      Default: (1, mins)
+    -async_task_stat_update_interval
+      Interval on which to try to update resource consumption stats.
+      Default: (1, hrs)
+    -async_worker_threads
+      The number of worker threads to process async task operations with.
+      Default: 8
+  * -backup_dir
+      Directory to store backups under. Will be created if it does not exist.
+    -backup_interval
+      Minimum interval on which to write a storage backup.
+      Default: (1, hrs)
+  * -cluster_name
+      Name to identify the cluster being served.
+    -cron_scheduler_num_threads
+      Number of threads to use for the cron scheduler thread pool.
+      Default: 10
+    -cron_scheduling_max_batch_size
+      The maximum number of triggered cron jobs that can be processed in a
+      batch.
+      Default: 10
+    -cron_start_initial_backoff
+      Initial backoff delay while waiting for a previous cron run to be
+      killed.
+      Default: (5, secs)
+    -cron_start_max_backoff
+      Max backoff delay while waiting for a previous cron run to be killed.
+      Default: (1, mins)
+    -cron_timezone
+      TimeZone to use for cron predictions.
+      Default: GMT
+    -custom_executor_config
+      Path to custom executor settings configuration file.
+    -default_docker_parameters
+      Default docker parameters for any job that does not explicitly declare
+      parameters.
+      Default: []
+    -dlog_max_entry_size
+      Specifies the maximum entry size to append to the log. Larger entries
+      will be split across entry Frames.
+      Default: (512, KB)
+    -dlog_snapshot_interval
+      Specifies the frequency at which snapshots of local storage are taken
+      and written to the log.
+      Default: (1, hrs)
+    -enable_cors_for
+      List of domains for which CORS support should be enabled.
+    -enable_mesos_fetcher
+      Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this
+      feature could pose a privilege escalation threat.
+      Default: false
+    -enable_preemptor
+      Enable the preemptor and preemption
+      Default: true
+    -enable_revocable_cpus
+      Treat CPUs as a revocable resource.
+      Default: true
+    -enable_revocable_ram
+      Treat RAM as a revocable resource.
+      Default: false
+    -enable_update_affinity
+      Enable best-effort affinity of task updates.
+      Default: false
+    -executor_user
+      User to start the executor. Defaults to "root". Set this to an
+      unprivileged user if the mesos master was started with
+      "--no-root_submissions". If set to anything other than "root", the
+      executor will ignore the "role" setting for jobs since it can't use
+      setuid() anymore. This means that all your jobs will run under the
+      specified user and the user has to exist on the Mesos agents.
+      Default: root
+    -first_schedule_delay
+      Initial amount of time to wait before first attempting to schedule a
+      PENDING task.
+      Default: (1, ms)
+    -flapping_task_threshold
+      A task that repeatedly runs for less than this time is considered to be
+      flapping.
+      Default: (5, mins)
+    -framework_announce_principal
+      When 'framework_authentication_file' flag is set, the FrameworkInfo
+      registered with the mesos master will also contain the principal. This
+      is necessary if you intend to use mesos authorization via mesos ACLs.
+      The default will change in a future release. Changing this value is
+      backwards incompatible. For details, see MESOS-703.
+      Default: false
+    -framework_authentication_file
+      Properties file which contains framework credentials to authenticate
+      with Mesosmaster. Must contain the properties
+      'aurora_authentication_principal' and 'aurora_authentication_secret'.
+    -framework_failover_timeout
+      Time after which a framework is considered deleted.  SHOULD BE VERY
+      HIGH.
+      Default: (21, days)
+    -framework_name
+      Name used to register the Aurora framework with Mesos.
+      Default: Aurora
+    -global_container_mounts
+      A comma separated list of mount points (in host:container form) to mount
+      into all (non-mesos) containers.
+      Default: []
+    -history_max_per_job_threshold
+      Maximum number of terminated tasks to retain in a job history.
+      Default: 100
+    -history_min_retention_threshold
+      Minimum guaranteed time for task history retention before any pruning is
+      attempted.
+      Default: (1, hrs)
+    -history_prune_threshold
+      Time after which the scheduler will prune terminated task history.
+      Default: (2, days)
+    -hold_offers_forever
+      Hold resource offers indefinitely, disabling automatic offer decline
+      settings.
+      Default: false
+    -host_maintenance_polling_interval
+      Interval between polling for pending host maintenance requests.
+      Default: (1, mins)
+    -hostname
+      The hostname to advertise in ZooKeeper instead of the locally-resolved
+      hostname.
+    -http_authentication_mechanism
+      HTTP Authentication mechanism to use.
+      Default: NONE
+      Possible Values: [NONE, BASIC, NEGOTIATE]
+    -http_port
+      The port to start an HTTP server on.  Default value will choose a random
+      port.
+      Default: 0
+    -initial_flapping_task_delay
+      Initial amount of time to wait before attempting to schedule a flapping
+      task.
+      Default: (30, secs)
+    -initial_schedule_penalty
+      Initial amount of time to wait before attempting to schedule a task that
+      has failed to schedule.
+      Default: (1, secs)
+    -initial_task_kill_retry_interval
+      When killing a task, retry after this delay if mesos has not responded,
+      backing off up to transient_task_state_timeout
+      Default: (15, secs)
+    -ip
+      The ip address to listen. If not set, the scheduler will listen on all
+      interfaces.
+    -job_update_history_per_job_threshold
+      Maximum number of completed job updates to retain in a job update
+      history.
+      Default: 10
+    -job_update_history_pruning_interval
+      Job update history pruning interval.
+      Default: (15, mins)
+    -job_update_history_pruning_threshold
+      Time after which the scheduler will prune completed job update history.
+      Default: (30, days)
+    -kerberos_debug
+      Produce additional Kerberos debugging output.
+      Default: false
+    -kerberos_server_keytab
+      Path to the server keytab.
+    -kerberos_server_principal
+      Kerberos server principal to use, usually of the form
+      HTTP/aurora.example.com@EXAMPLE.COM
+    -max_flapping_task_delay
+      Maximum delay between attempts to schedule a flapping task.
+      Default: (5, mins)
+    -max_leading_duration
+      After leading for this duration, the scheduler should commit suicide.
+      Default: (1, days)
+    -max_parallel_coordinated_maintenance
+      Maximum number of coordinators that can be contacted in parallel.
+      Default: 10
+    -max_registration_delay
+      Max allowable delay to allow the driver to register before aborting
+      Default: (1, mins)
+    -max_reschedule_task_delay_on_startup
+      Upper bound of random delay for pending task rescheduling on scheduler
+      startup.
+      Default: (30, secs)
+    -max_saved_backups
+      Maximum number of backups to retain before deleting the oldest backups.
+      Default: 48
+    -max_schedule_attempts_per_sec
+      Maximum number of scheduling attempts to make per second.
+      Default: 40.0
+    -max_schedule_penalty
+      Maximum delay between attempts to schedule a PENDING tasks.
+      Default: (1, mins)
+    -max_sla_duration_secs
+      Maximum duration window for which SLA requirements are to be
+      satisfied.This does not apply to jobs that have a CoordinatorSlaPolicy.
+      Default: (2, hrs)
+    -max_status_update_batch_size
+      The maximum number of status updates that can be processed in a batch.
+      Default: 1000
+    -max_task_event_batch_size
+      The maximum number of task state change events that can be processed in
+      a batch.
+      Default: 300
+    -max_tasks_per_job
+      Maximum number of allowed tasks in a single job.
+      Default: 4000
+    -max_tasks_per_schedule_attempt
+      The maximum number of tasks to pick in a single scheduling attempt.
+      Default: 5
+    -max_update_instance_failures
+      Upper limit on the number of failures allowed during a job update. This
+      helps cap potentially unbounded entries into storage.
+      Default: 20000
+    -mesos_driver
+      Which Mesos Driver to use
+      Default: SCHEDULER_DRIVER
+      Possible Values: [SCHEDULER_DRIVER, V0_DRIVER, V1_DRIVER]
+  * -mesos_master_address
+      Address for the mesos master, can be a socket address or zookeeper path.
+    -mesos_role
+      The Mesos role this framework will register as. The default is to left
+      this empty, and the framework will register without any role and only
+      receive unreserved resources in offer.
+    -min_offer_hold_time
+      Minimum amount of time to hold a resource offer before declining.
+      Default: (5, mins)
+    -min_required_instances_for_sla_check
+      Minimum number of instances required for a job to be eligible for SLA
+      check. This does not apply to jobs that have a CoordinatorSlaPolicy.
+      Default: 20
+    -native_log_election_retries
+      The maximum number of attempts to obtain a new log writer.
+      Default: 20
+    -native_log_election_timeout
+      The timeout for a single attempt to obtain a new log writer.
+      Default: (15, secs)
+    -native_log_file_path
+      Path to a file to store the native log data in.  If the parent directory
+      doesnot exist it will be created.
+    -native_log_quorum_size
+      The size of the quorum required for all log mutations.
+      Default: 1
+    -native_log_read_timeout
+      The timeout for doing log reads.
+      Default: (5, secs)
+    -native_log_write_timeout
+      The timeout for doing log appends and truncations.
+      Default: (3, secs)
+    -native_log_zk_group_path
+      A zookeeper node for use by the native log to track the master
+      coordinator.
+    -offer_filter_duration
+      Duration after which we expect Mesos to re-offer unused resources. A
+      short duration improves scheduling performance in smaller clusters, but
+      might lead to resource starvation for other frameworks if you run many
+      frameworks in your cluster.
+      Default: (5, secs)
+    -offer_hold_jitter_window
+      Maximum amount of random jitter to add to the offer hold time window.
+      Default: (1, mins)
+    -offer_order
+      Iteration order for offers, to influence task scheduling. Multiple
+      orderings will be compounded together. E.g. CPU,MEMORY,RANDOM would sort
+      first by cpus offered, then memory and finally would randomize any equal
+      offers.
+      Default: [RANDOM]
+    -offer_reservation_duration
+      Time to reserve a agent's offers while trying to satisfy a task
+      preempting another.
+      Default: (3, mins)
+    -offer_set_module
+      Custom Guice module to provide a custom OfferSet.
+      Default: class org.apache.aurora.scheduler.offers.OfferManagerModule$OfferSetModule
+    -offer_static_ban_cache_max_size
+      The number of offers to hold in the static ban cache. If no value is
+      specified, the cache will grow indefinitely. However, entries will
+      expire within 'min_offer_hold_time' + 'offer_hold_jitter_window' of
+      being written.
+      Default: 9223372036854775807
+    -partition_aware
+      Enable paritition-aware status updates.
+      Default: false
+    -populate_discovery_info
+      If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+      Default: false
+    -preemption_delay
+      Time interval after which a pending task becomes eligible to preempt
+      other tasks
+      Default: (3, mins)
+    -preemption_reservation_max_batch_size
+      The maximum number of reservations for a task group to be made in a
+      batch.
+      Default: 5
+    -preemption_slot_finder_modules
+      Guice modules for custom preemption slot searching for pending tasks.
+      Default: [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule]
+    -preemption_slot_hold_time
+      Time to hold a preemption slot found before it is discarded.
+      Default: (5, mins)
+    -preemption_slot_search_initial_delay
+      Initial amount of time to delay preemption slot searching after
+      scheduler start up.
+      Default: (3, mins)
+    -preemption_slot_search_interval
+      Time interval between pending task preemption slot searches.
+      Default: (1, mins)
+    -receive_revocable_resources
+      Allows receiving revocable resource offers from Mesos.
+      Default: false
+    -reconciliation_explicit_batch_interval
+      Interval between explicit batch reconciliation requests.
+      Default: (5, secs)
+    -reconciliation_explicit_batch_size
+      Number of tasks in a single batch request sent to Mesos for explicit
+      reconciliation.
+      Default: 1000
+    -reconciliation_explicit_interval
+      Interval on which scheduler will ask Mesos for status updates of
+      allnon-terminal tasks known to scheduler.
+      Default: (60, mins)
+    -reconciliation_implicit_interval
+      Interval on which scheduler will ask Mesos for status updates of
+      allnon-terminal tasks known to Mesos.
+      Default: (60, mins)
+    -reconciliation_initial_delay
+      Initial amount of time to delay task reconciliation after scheduler
+      start up.
+      Default: (1, mins)
+    -reconciliation_schedule_spread
+      Difference between explicit and implicit reconciliation intervals
+      intended to create a non-overlapping task reconciliation schedule.
+      Default: (30, mins)
+    -require_docker_use_executor
+      If false, Docker tasks may run without an executor (EXPERIMENTAL)
+      Default: true
+    -scheduling_max_batch_size
+      The maximum number of scheduling attempts that can be processed in a
+      batch.
+      Default: 3
+    -serverset_endpoint_name
+      Name of the scheduler endpoint published in ZooKeeper.
+      Default: http
+  * -serverset_path
+      ZooKeeper ServerSet path to register at.
+    -shiro_after_auth_filter
+      Fully qualified class name of the servlet filter to be applied after the
+      shiro auth filters are applied.
+    -shiro_credentials_matcher
+      The shiro credentials matcher to use (will be constructed by Guice).
+      Default: class org.apache.shiro.authc.credential.SimpleCredentialsMatcher
+    -shiro_ini_path
+      Path to shiro.ini for authentication and authorization configuration.
+    -shiro_realm_modules
+      Guice modules for configuring Shiro Realms.
+      Default: [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule]
+    -sla_aware_action_max_batch_size
+      The maximum number of sla aware update actions that can be processed in
+      a batch.
+      Default: 300
+    -sla_aware_kill_non_prod
+      Enables SLA awareness for drain and and update for non-production tasks
+      Default: false
+    -sla_aware_kill_retry_max_delay
+      Maximum amount of time to wait between attempting to perform an
+      SLA-Aware kill on a task.
+      Default: (5, mins)
+    -sla_aware_kill_retry_min_delay
+      Minimum amount of time to wait between attempting to perform an
+      SLA-Aware kill on a task.
+      Default: (1, mins)
+    -sla_coordinator_timeout
+      Timeout interval for communicating with Coordinator.
+      Default: (1, mins)
+    -sla_non_prod_metrics
+      Metric categories collected for non production tasks.
+      Default: []
+    -sla_prod_metrics
+      Metric categories collected for production tasks.
+      Default: [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS]
+    -sla_stat_refresh_interval
+      The SLA stat refresh interval.
+      Default: (1, mins)
+    -stat_retention_period
+      Time for a stat to be retained in memory before expiring.
+      Default: (1, hrs)
+    -stat_sampling_interval
+      Statistic value sampling interval.
+      Default: (1, secs)
+    -task_assigner_modules
+      Guice modules for customizing task assignment.
+      Default: [class org.apache.aurora.scheduler.scheduling.TaskAssignerImplModule]
+    -thermos_executor_cpu
+      The number of CPU cores to allocate for each instance of the executor.
+      Default: 0.25
+    -thermos_executor_flags
+      Extra arguments to be passed to the thermos executor
+    -thermos_executor_path
+      Path to the thermos executor entry point.
+    -thermos_executor_ram
+      The amount of RAM to allocate for each instance of the executor.
+      Default: (128, MB)
+    -thermos_executor_resources
+      A comma separated list of additional resources to copy into the
+      sandbox.Note: if thermos_executor_path is not the thermos_executor.pex
+      file itself, this must include it.
+      Default: []
+    -thermos_home_in_sandbox
+      If true, changes HOME to the sandbox before running the executor. This
+      primarily has the effect of causing the executor and runner to extract
+      themselves into the sandbox.
+      Default: false
+    -thrift_method_interceptor_modules
+      Custom Guice module(s) to provide additional Thrift method interceptors.
+      Default: []
+    -tier_config
+      Configuration file defining supported task tiers, task traits and
+      behaviors.
+    -transient_task_state_timeout
+      The amount of time after which to treat a task stuck in a transient
+      state as LOST.
+      Default: (5, mins)
+    -unavailability_threshold
+      Threshold time, when running tasks should be drained from a host, before
+      a host becomes unavailable. Should be greater than min_offer_hold_time +
+      offer_hold_jitter_window.
+      Default: (6, mins)
+    -update_affinity_reservation_hold_time
+      How long to wait for a reserved agent to reoffer freed up resources.
+      Default: (3, mins)
+    -viz_job_url_prefix
+      URL prefix for job container stats.
+      Default: <empty string>
+    -webhook_config
+      Path to webhook configuration file.
+    -zk_chroot_path
+      chroot path to use for the ZooKeeper connections
+    -zk_connection_timeout
+      The ZooKeeper connection timeout.
+      Default: (10, secs)
+    -zk_digest_credentials
+      user:password to use when authenticating with ZooKeeper.
+  * -zk_endpoints
+      Endpoint specification for the ZooKeeper servers.
+    -zk_in_proc
+      Launches an embedded zookeeper server for local testing causing
+      -zk_endpoints to be ignored if specified.
+      Default: false
+    -zk_session_timeout
+      The ZooKeeper session timeout.
+      Default: (15, secs)
+```
diff --git a/source/documentation/0.22.0/reference/scheduler-endpoints.md b/source/documentation/0.22.0/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/0.22.0/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/0.22.0/reference/task-lifecycle.md b/source/documentation/0.22.0/reference/task-lifecycle.md
new file mode 100644
index 0000000..b4642b3
--- /dev/null
+++ b/source/documentation/0.22.0/reference/task-lifecycle.md
@@ -0,0 +1,175 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+
+## RUNNING to PARTITIONED states
+If Aurora is configured to enable partition awareness, a task which is in a
+running state can transition to `PARTITIONED`. This happens when the state
+of the task in Mesos becomes unknown. By default Aurora errs on the side of
+availability, so all tasks that transition to `PARTITIONED` are immediately
+transitioned to `LOST`.
+
+This policy is not ideal for all types of workloads you may wish to run in
+your Aurora cluster, e.g. for jobs where task failures are very expensive.
+So job owners may set their own `PartitionPolicy` where they can control
+how long to remain in `PARTITIONED` before transitioning to `LOST`. Or they
+can disable any automatic attempts to `reschedule` when in `PARTITIONED`,
+effectively waiting out the partition for as long as possible.
+
+
+## PARTITIONED and transient states
+The `PartitionPolicy` provided by users only applies to tasks which are
+currently running. When tasks are moving in and out of transient states,
+e.g. tasks being updated, restarted, preempted, etc., `PARTITIONED` tasks
+are moved immediately to `LOST`. This prevents situations where system
+or user-initiated actions are blocked indefinitely waiting for partitions
+to resolve (that may never be resolved).
+
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/0.5.0-incubating/client-commands.md b/source/documentation/0.5.0-incubating/client-commands.md
new file mode 100644
index 0000000..986614d
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/client-commands.md
@@ -0,0 +1,482 @@
+Aurora Client Commands
+======================
+
+The most up-to-date reference is in the client itself: use the
+`aurora help` subcommand (for example, `aurora help` or
+`aurora help create`) to find the latest information on parameters and
+functionality. Note that `aurora help open` does not work, due to underlying issues with
+reflection.
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that speciies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `cancel_update`
+  - `create`
+  - `kill`
+  - `restart`
+  - `update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.5.0-incubating/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+`create` can take four named parameters:
+
+- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
+  value. Multiple flags specify multiple values. Defaults to `[]`.
+- ` -o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  URL prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- ` -j, --json` If specified, configuration argument is read as a
+  string in JSON format. Defaults to False.
+- ` --wait_until=STATE` Block the client until all the Tasks have
+  transitioned into the requested state. Possible values are: `PENDING`,
+  `RUNNING`, `FINISHED`. Default: `PENDING`
+
+### Running a Command On a Running Job
+
+    aurora run <job_key> <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+`run` can take three named parameters:
+
+- `-t NUM_THREADS`, `--threads=NUM_THREADS `The number of threads to
+  use, defaulting to `1`.
+- `--user=SSH_USER` ssh as this user instead of the given role value.
+  Defaults to None.
+- `-e, --executor_sandbox`  Run the command in the executor sandbox
+  instead of the Task sandbox. Defaults to False.
+
+### Killing a Job
+
+    aurora kill <job key> <configuration file>
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all shards in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+`kill` can take two named parameters:
+
+- `-o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  URL prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- `--shards=SHARDS` A list of shard ids to act on. Can either be a
+  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
+  combination of the two (e.g. 0-2,5,7-9). Defaults to acting on all
+  shards.
+
+### Updating a Job
+
+    aurora update [--shards=ids] <job key> <configuration file>
+    aurora cancel_update <job key> <configuration file>
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by
+using `--shards` and specifying a comma-separated list of job shard ids.
+
+You may want to run `aurora diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora cancel_update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `cancel_update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `cancel_update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `update`.
+
+`update` can take four named parameters:
+
+- `--shards=SHARDS` A list of shard ids to update. Can either be a
+  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
+  combination of the two (e.g. 0-2,5,7-9). If not  set, all shards are
+  acted on. Defaults to None.
+- `-E NAME=VALUE` Binds a Thermos mustache variable name to a value.
+  Use multiple flags to specify multiple values. Defaults to `[]`.
+- `-j, --json` If specified, configuration is read in JSON format.
+  Defaults to `False`.
+- `--updater_health_check_interval_seconds=HEALTH_CHECK_INTERVAL_SECONDS`
+  Time interval between subsequent shard status checks. Defaults to `3`.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora diff <job_key> <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora create <new_job_key> <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora kill <old_job_key>
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora restart <job_key> <configuration file>
+
+Restarts are controlled on the client side, so aborting
+the `restart` command halts the restart operation.
+
+`restart` does a rolling restart. You almost always want to do this, but
+not if all shards of a service are misbehaving and are
+completely dysfunctional. To not do a rolling restart, use
+the `-shards` option described below.
+
+**Note**: `restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `<configuration file>` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`restart` command.
+
+In addition to the required job key argument, there are eight
+`restart` specific optional arguments:
+
+- `--updater_health_check_interval_seconds`: Defaults to `3`, the time
+  interval between subsequent shard status checks.
+- `--shards=SHARDS`: Defaults to None, which restarts all shards.
+  Otherwise, only the specified-by-id shards restart. They can be
+  comma-separated `(0, 8, 9)`, a range `(3-5)` or a
+  combination `(0, 3-5, 8, 9-11)`.
+- `--batch_size`: Defaults to `1`, the number of shards to be started
+  in one iteration. So, for example, for value 3, it tries to restart
+  the first three shards specified by `--shards` simultaneously, then
+  the next three, and so on.
+- `--max_per_shard_failures=MAX_PER_SHARD_FAILURES`: Defaults to `0`,
+  the maximum number of restarts per shard during restart. When
+  exceeded, it increments the total failure count.
+- `--max_total_failures=MAX_TOTAL_FAILURES`: Defaults to `0`, the
+  maximum total number of shard failures tolerated during restart.
+- `-o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  url prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- `--restart_threshold`: Defaults to `60`, the maximum number of
+  seconds before a shard must move into the `RUNNING` state before
+  it's considered a failure.
+- `--watch_secs`: Defaults to `45`, the minimum number of seconds a
+  shard must remain in `RUNNING` state before considered a success.
+
+Cron Jobs
+---------
+
+You will see various commands and options relating to cron jobs in
+`aurora -help` and similar. Ignore them, as they're not yet implemented.
+You might be able to use them without causing an error, but nothing happens
+if you do.
+
+Comparing Jobs
+--------------
+
+    aurora diff <job_key> config
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+There are two named parameters:
+
+- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
+  value. Multiple flags may be used to specify multiple values.
+  Defaults to `[]`.
+- `-j, --json` Read the configuration argument in JSON format.
+  Defaults to `False`.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora list_jobs
+    Usage: `aurora list_jobs cluster/role
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+It has two named parameters:
+
+- `--pretty`: Displays job information in prettyprinted format.
+  Defaults to `False`.
+- `-c`, `--show-cron`: Shows cron schedule for jobs. Defaults to
+  `False`. Do not use, as it's not yet implemented.
+
+### Inspecting a Job
+
+    aurora inspect <job_key> config
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration. It has four
+named parameters:
+
+- `--local`: Inspect the configuration that the  `spawn` command would
+  create, defaulting to `False`.
+- `--raw`: Shows the raw configuration. Defaults to `False`.
+- `-j`, `--json`: If specified, configuration is read in JSON format.
+  Defaults to `False`.
+- `-E NAME=VALUE`: Bind a Thermos Mustache variable name to a value.
+  You can use multiple flags to specify multiple values. Defaults
+  to `[]`
+
+### Versions
+
+    aurora version
+
+Lists client build information and what Aurora API version it supports.
+
+### Checking Your Quota
+
+    aurora get_quota --cluster=CLUSTER role
+
+  Prints the production quota allocated to the role's value at the given
+cluster.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora create example/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+It can take three named parameters:
+
+- `-e`, `--executor_sandbox`:  Run `ssh` in the executor sandbox
+  instead of the  task sandbox. Defaults to `False`.
+- `--user=SSH_USER`: `ssh` as the given user instead of as the role in
+  the `job_key` argument. Defaults to none.
+- `-L PORT:NAME`: Add tunnel from local port `PORT` to the remote
+  named port  `NAME`. Defaults to `[]`.
+
+### Templating Command Arguments
+
+    aurora run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.5.0-incubating/clientv2.md b/source/documentation/0.5.0-incubating/clientv2.md
new file mode 100644
index 0000000..6e69af3
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/clientv2.md
@@ -0,0 +1,405 @@
+Aurora Client v2
+=================
+
+Overview
+-----------
+
+Our goal is to replace the current Aurora command-line client. The
+current client suffers from an early monolithic structure, and a long
+development history of rapid unplanned evolution.
+
+In addition to its internal problems, the current Aurora client is
+confusing for users. There are several different kinds of objects
+manipulated by the Aurora command line, and the difference between
+them is often not clear. (What's the difference between a job and a
+configuration?) For each type of object, there are different commands,
+and it's hard to remember which command should be used for which kind
+of object.
+
+Instead of continuing to let the Aurora client develop and evolve
+randomly, it's time to take a principled look at the Aurora command
+line, and figure out how to make our command line processing make
+sense. At the same time, the code needs to be cleaned up, and divided
+into small comprehensible units based on a plugin architecture.
+
+Doing this now will give us a more intuitive, consistent, and easy to
+use client, as well as a sound platform for future development.
+
+Goals
+-------
+
+* A command line tool for interacting with Aurora that is easy for
+  users to understand.
+* A noun/verb command model.
+* A modular source-code architecture.
+* Non-disruptive transition for users.
+
+Non-Goals
+----------
+
+* The most important non-goal is that we're not trying to redesign the
+  Aurora scheduler, the Aurora executor, or any of the peripheral tools
+  that the Aurora command line interacts with; we only want to create a
+  better command line client.
+* We do not want to change thermos, mesos, hadoop, etc.
+* We do not want to create new objects that users will work with to
+  interact with Mesos or Aurora.
+* We do not want to change Aurora job configuration files or file formats.
+* We do not want to change the Aurora API.
+* We don't want to boil the ocean: there are many things that we could
+  include in the scope of this project, but we don't want to be
+  distracted by re-implementing all of twitter.commons in order to
+  create a perfect Aurora client.
+
+
+Background
+-----------
+
+Aurora is a system that's used to run and manage services and
+service-like jobs running in a datacenter. Aurora takes care of
+allocating resources in order to schedule and run jobs without
+requiring teams to manage dedicated hardware. The heart of Aurora is
+called the scheduler, and is responsible for finding and assigning
+resources to tasks.
+
+The Aurora scheduler provides a thrift API. The scheduler API is
+low-level and difficult to interact with. Users do not interact
+directly with the Aurora API; instead, they use a command-line tool,
+which provides a collection of easy-to-use commands. This command-line
+tool, in turn, talks to the scheduler API to launch and manage jobs in
+datacenter clusters. The command-line tool is called the Aurora
+client.
+
+The current implementation of the Aurora client is haphazard,
+and really needs to be cleaned up:
+
+- The code is monolithic and hard to maintain. It's implemented using
+  `twitter.common.app`, which assumes that all of the command code lives
+  in a single source file. To work around this, and allow some
+  subdivision, it uses a hack of `twitter.common.app` to force
+  registration of commands from multiple modules. It's hard to
+  understand, and hard to modify.
+- The current code is very difficult to test. Because of the way it's
+  built, there is no consistent way of passing key application data
+  around. As a result, each unit test of client operations needs a
+  difficult-to-assemble custom setup of mock objects.
+- The current code handles errors poorly, and it is difficult to
+  fix. Many common errors produce unacceptable results. For example,
+  issuing an unknown command generates an error message "main takes 0
+  parameters but received 1"; passing an invalid parameter to other
+  commands frequently produces a stack trace.
+- The current command line is confusing for users. There are several
+  different kinds of objects manipulated by the Aurora command line,
+  and the difference between them is often not entirely clear. (What's
+  the difference between a job and a configuration?)
+  For each type of object, there are different
+  commands, and it's frequently not clear just which command should be
+  used for which object.
+
+
+Instead of continuing to let it develop and evolve randomly, it's time
+to take a principled look at the Aurora command line, and figure out
+how to make command line processing make sense. At the same time, the
+code needs to be cleaned up, and divided into small comprehensible
+units based on a plugin architecture.
+
+Requirements
+-------------
+
+Aurora is aimed at engineers who run jobs and services in a
+datacenter. As a result, the requirements for the aurora client are
+all engineering focused:
+
+* __Consistency__: commands should follow a consistent structure, so that
+  users can apply knowledge and intuition gained from working with
+  some aurora commands to new commands. This means that when commands
+  can re-use the same options, they should; that objects should be
+  referred to by consistent syntax throughout the tool.
+* __Helpfulness__: commands should be structured so that the system can
+  generate helpful error messages. If a user just runs "aurora", they
+  should get a basic usage message. If they try to run an invalid
+  command, they should get a message that the command is invalid, not
+  a stack dump or "command main() takes 0 parameters but received
+  2". Commands should not generate extraneous output that obscures the
+  key facts that the user needs to know, and the default behavior of
+  commands should not generate outputs that will be routinely ignored
+  by users.
+* __Extensibility__: it should be easy to plug in new commands,
+  including custom commands, to adapt the Aurora client to new
+  environments.
+* __Script-friendly command output__: every command should at least include
+  an option that generates output that's script-friendly. Scripts should be
+  able to work with command-output without needing to do screen scraping.
+* __Scalability__: the tools should be usable for any foreseeable size
+  of Aurora datacenters and machine clusters.
+
+Design Overview
+-----------------
+
+The Aurora client will be reimplemented using a noun-verb model,
+similar to the cmdlet model used by Monad/Windows Powershell. Users
+will work by providing a noun for the type of object being operated
+on, and a verb for the specific operation being performed on the
+object, followed by parameters. For example, to create a job, the user
+would execute: "`aurora job create smfd/mchucarroll/devel/jobname
+job.aurora`". The noun is `job` and the verb is `create`.
+
+The client will be implemented following that noun-verb
+convention. Each noun will be a separate component, which can be
+registered into the command-line framework. Each verb will be
+implemented by a class that registers with the appropriate noun. Nouns
+and verbs will each provide methods that add their command line
+options and parameters to the options parser, using the Python
+argparse library.
+
+Detailed Design
+-----------------
+
+### Interface
+
+In this section, we'll walk through the types of objects that the
+client can manipulate, and the operations that need to be provided for
+each object. These form the primary interface that engineers will use
+to interact with Aurora.
+
+In the command-line, each of the object types will have an Aurora
+subcommand. The commands to manipulate the object type will follow the
+type. For example, here are several commands in the old syntax
+contrasted against the new noun/verb syntax.
+
+* Get quota for a role:
+   * Noun/Verb syntax:  `aurora quota get west/www-data`
+   * Old syntax: `aurora get_quota --cluster=smf1 www-data`
+* Create job:
+   * Noun/Verb syntax: `aurora job create west/www-data/test/job job.aurora`
+   * Old syntax: `aurora create west/www-data/test/job job.aurora`
+* Schedule a job to run at a specific interval:
+   * Noun/verb: `aurora cron schedule east/www-data/test/job job.aurora`
+   * Old: `aurora create east/www-data/test/job job.aurora`
+
+As you can see in these examples, the new syntax is more consistent:
+you always specify the cluster where a command executes as part of an
+identifier, where in the old syntax, it was sometimes part of the
+jobkey and sometimes specified with a "--cluster" option.
+
+The new syntax is also more clear and explicit: even without knowing
+much about Aurora, it's clear what objects each command is acting on,
+where in the old syntax, commands like "create" are unclear.
+
+### The Job Noun
+
+A job is a configured program ready to run in Aurora. A job is,
+conceptually, a task factory: when a job is submitted to the Aurora
+scheduler, it creates a collection of tasks. The job contains a
+complete description of everything it needs to create a collection of
+tasks. (Note that this subsumes "service" commands. A service is just
+a task whose configuration sets the is_service flag, so we don't have
+separate commands for working with services.) Jobs are specified using
+`cluster/role/env/name` jobkey syntax.
+
+* `aurora job create *jobkey* *config*`:  submits a job to a cluster, launching the task(s) specified by the job config.
+* `aurora job status *jobkey*`: query job status. Prints information about the job,
+  whether it's running, etc., to standard out. If jobkey includes
+  globs, it should list all jobs that match the glob
+* `aurora job kill *jobkey*/*instanceids*`: kill/stop some of a jobs instances. This stops a job' tasks; if the job
+  has service tasks, they'll be  disabled, so that they won't restart.
+* `aurora job killall *jobkey*`: kill all of the instances of a job. This
+  is distinct from the *kill* command as a safety measure: omitting the
+  instances from a kill command shouldn't result in destroying the entire job.
+* `aurora job restart *jobkey*`: conceptually, this will kill a job, and then
+  launch it again. If the job does not exist, then fail with an error
+  message.  In fact, the underlying implementation does the
+  kill/relaunch on a rolling basis - so it's not an immediate kill of
+  all shards/instances, followed by a delay as all instances relaunch,
+  but rather a controlled gradual process.
+* `aurora job list *jobkey*`: list all jobs that match the jobkey spec that are
+  registered with the scheduler. This will include both jobs that are
+  currently running, and jobs that are scheduled to run at a later
+  time. The job key can be partial: if it specifies cluster, all jobs
+  on the cluster will be listed; cluster/role, all jobs running on the cluster under the role will be listed, etc.
+
+The Schedule Noun (Cron)
+--------------------------
+
+Note (3/21/2014): The "cron" noun is _not_ implemented yet.
+
+Cron is a scheduler adjunct that periodically runs a job on a
+schedule. The cron commands all manipulate cron schedule entries. The
+schedules are specified as a part of the job configuration.
+
+* `aurora cron schedule jobkey config`: schedule a job to run by cron.
+* `aurora cron deschedule jobkey`: removes a jobs entry from the cron schedule.
+* `aurora cron status jobkey`: query for a scheduled job's status.
+
+The Quota Noun
+---------------
+
+A quota is a data object maintained by the scheduler that specifies the maximum
+resources that may be consumed by jobs owned by a particular role. In the future,
+we may add new quota types. At some point, we'll also probably add an administrators
+command to set quotas.
+
+* `aurora quota get *cluster/role*`
+
+
+Implementation
+---------------
+
+The current command line is monolithic. Every command on an Aurora
+object is a top-level command in the Aurora client. In the
+restructured command line, each of the primary object types
+manipulated by Aurora should have its own sub-command.
+
+* Advantages of this approach:
+   * Easier to detangle the command-line processing. The top-level
+     command-processing will be a small set of subcommand
+     processors. Option processing for each subcommand can be offloaded
+     to a separate module.
+   * The aurora top-level help command will be much more
+     comprehensible. Instead of giving a huge list of every possible
+     command, it will present the list of top-level object types, and
+     then users can request help on the commands for a specific type
+     of object.
+   * The sub-commands can be separated into distinct command-line
+     tools when appropriate.
+
+### Command Structure and Options Processing
+
+The implementation will follow closely on Pants goals. Pants goals use
+a static registration system to add new subcommands. In pants, each
+goal command is an implementation of a command interface, and provides
+implementations of methods to register options and parameters, and to
+actually execute the command. In this design, commands are modular and
+easy to implement, debug, and combine in different ways.
+
+For the Aurora client, we plan to use a two-level variation of the
+basic concept from pants. At the top-level we will have nouns. A noun
+will define some common command-line parameters required by all of its
+verbs, and will provide a registration hook for attaching verbs. Nouns
+will be implemented as a subclass of a basic Noun type.
+
+Each verb will, similarly, be implemented as a subclass of Verb. Verbs
+will be able to specify command-line options and parameters.
+
+Both `Noun` and `Verb` will be subclasses of a common base-class `AuroraCommand`:
+
+    class AuroraCommand(object):
+      def get_options(self):
+      """Gets the set of command-line options objects for this command.
+      The result is a list of CommandOption objects.
+       """
+        pass
+
+      @property
+      def help(self):
+        """Returns the help message for this command"""
+
+      @property
+      def usage(self):
+        """Returns a short usage description of the command"""
+
+      @property
+      def name(self):
+        """Returns the command name"""
+
+
+A command-line tool will be implemented as an instance of a `CommandLine`:
+
+    class CommandLine(object):
+      """The top-level object implementing a command-line application."""
+
+      @property
+      def name(self):
+        """Returns the name of this command-line tool"""
+
+      def print_out(self, str):
+        print(str)
+
+      def print_err(self, str):
+        print(str, file=sys.stderr)
+
+      def register_noun(self, noun):
+        """Adds a noun to the application"""
+
+      def register_plugin(self, plugin):
+	     """Adds a configuration plugin to the system"""
+
+
+Nouns are registered into a command-line using the `register_noun`
+method. They are weakly coupled to the application, making it easy to
+use a single noun in several different command-line tools. Nouns allow
+the registration of verbs using the `register_verb` method.
+
+When commands execute, they're given an instance of a *context object*.
+The context object must be an instance of a subclass of `AuroraCommandContext`.
+Options, parameters, and IO are all accessed using the context object. The context
+is created dynamically by the noun object owning the verb being executed. Developers
+are strongly encouraged to implement custom contexts for their nouns, and move functionality
+shared by the noun's verbs into the context object. The context interface is:
+
+    class Context(object):
+      class Error(Exception): pass
+
+      class ArgumentException(Error): pass
+
+      class CommandError(Error):
+
+      @classmethod
+      def exit(cls, code, msg):
+	    """Exit the application with an error message"""
+        raise cls.CommandError(code, msg)
+
+     def print_out(self, msg, indent=0):
+       """Prints a message to standard out, with an indent"""
+
+     def print_err(self, msg, indent=0):
+       """Prints a message to standard err, with an indent"""
+
+
+In addition to nouns and verbs, there's one more kind of registerable
+component, called a *configuration plugin*. These objects add a set of
+command-line options that can be passed to *all* of the commands
+implemented in the tool. Before the command is executed, the
+configuration plugin will be invoked, and will process its
+command-line arguments. This is useful for general configuration
+changes, like establish a secure tunnel to talk to machines in a
+datacenter. (A useful way to think of a plugin is as something like an
+aspect that can be woven in to aurora to provide environment-specific
+configuration.) A configuration plugin is implemented as an instance
+of class `ConfigurationPlugin`, and registered with the
+`register_plugin` method of the `CommandLine` object. The interface of
+a plugin is:
+
+    class ConfigurationPlugin(object):
+      """A component that can be plugged in to a command-line."""
+
+      @abstractmethod
+      def get_options(self):
+        """Return the set of options processed by this plugin"""
+
+      @abstractmethod
+      def execute(self, context):
+        """Run the context/command line initialization code for this plugin."""
+
+
+### Command Execution
+
+The options process and command execution is built as a facade over Python's
+standard argparse. All of the actual argument processing is done by the
+argparse library.
+
+Once the options are processed, the framework will start to execute the command. Command execution consists of:
+
+# Create a context object. The framework will use the argparse options to identify
+  which noun is being invoked, and will call that noun's `create_context` method.
+  The argparse options object will be stored in the context.
+# Execute any configuration plugins. Before any command is invoked, the framework
+  will first iterate over all of the registered configuration plugins. For each
+  plugin, it will invoke the `execute` method.
+# The noun will use the context to find out what verb is being invoked, and it will
+  then call that verb's `execute` method.
+# The command will exit. Its return code will be whatever was returned by the verb's
+  `execute` method.
+
+Commands are expected to return a code from a list of standard exit codes,
+which can be found in `src/main/python/apache/aurora/client/cli/__init__.py`.
diff --git a/source/documentation/0.5.0-incubating/committers.md b/source/documentation/0.5.0-incubating/committers.md
new file mode 100644
index 0000000..3f68f89
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/committers.md
@@ -0,0 +1,49 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.5.0-incubating/configuration-reference.md b/source/documentation/0.5.0-incubating/configuration-reference.md
new file mode 100644
index 0000000..4ef6737
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/configuration-reference.md
@@ -0,0 +1,502 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.5.0-incubating/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.5.0-incubating/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of times processes that are part of this
+Task can fail before the entire Task is marked for failure.
+
+For example:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as
+permanently failed, and the `succeeding` Process would succeed on the
+first run. The task would succeed despite only allowing for two failed
+processes. To be more specific, there would be 10 failed process runs
+yet 1 failed process.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.5.0-incubating/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` **(Present, but not supported and a no-op)** | String | UTC Cron schedule in cron format. May only be used with non-service jobs. Default: None (not a cron job.)
+  ```cron_collision_policy``` **(Present, but not supported and a no-op)** | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. RUN_OVERLAP Let the previous run continue, and schedule the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```daemon``` | Boolean | A DEPRECATED alias for "service". (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota.
+  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.5.0-incubating/configuration-tutorial.md b/source/documentation/0.5.0-incubating/configuration-tutorial.md
new file mode 100644
index 0000000..eebf0bb
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/configuration-tutorial.md
@@ -0,0 +1,1139 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora inspect`. It takes the same job key and configuration file
+arguments as `aurora create` or `aurora update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.5.0-incubating/tutorial/).
+
+[The Basics](#Basics)
+&nbsp;&nbsp;&nbsp;&nbsp;[Use Bottom-To-Top Object Ordering](#Bottom)
+[An Example Configuration File](#Example)
+[Defining Process Objects](#Process)
+[Getting Your Code Into The Sandbox](#Sandbox)
+[Defining Task Objects](#Task)
+&nbsp;&nbsp;&nbsp;&nbsp;[`SequentialTask`](#Sequential)
+&nbsp;&nbsp;&nbsp;&nbsp;[`SimpleTask`](#Simple)
+&nbsp;&nbsp;&nbsp;&nbsp;[`Tasks.concat` and `Tasks.combine`](#Concat)
+[Defining `Job` Objects](#Job)
+[Defining The `jobs` List](#jobs)
+[Templating](#Templating)
+[Templating 1: Binding in Pystachio](#Binding)
+[Structurals in Pystachio / Aurora](#Structurals)
+&nbsp;&nbsp;&nbsp;&nbsp;[Mustaches Within Structurals](#Mustaches)
+[Templating 2: Structurals Are Factories](#Factories)
+&nbsp;&nbsp;&nbsp;&nbsp;[A Second Way of Templating](#Second)
+[Advanced Binding](#AdvancedBinding)
+[Bind Syntax](#BindSyntax)
+&nbsp;&nbsp;&nbsp;&nbsp;[Binding Complex Objects](#ComplexObjects)
+[Structural Binding](#StructuralBinding)
+[Configuration File Writing Tips And Best Practices](#Tips)
+&nbsp;&nbsp;&nbsp;&nbsp;[Use As Few `.aurora` Files As Possible](#Few)
+&nbsp;&nbsp;&nbsp;&nbsp;[Avoid Boilerplate](#Boilerplate)
+&nbsp;&nbsp;&nbsp;&nbsp;[Thermos Uses bash, But Thermos Is Not bash](#Bash)
+&nbsp;&nbsp;&nbsp;&nbsp;[Rarely Use Functions In Your Configurations](#Functions)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.5.0-incubating/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.5.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has five optional attributes, each with a default value
+if one isn't specified in the configuration:
+
+-   `max_failures`: Defaulting to `1`, the maximum number of failures
+    (non-zero exit statuses) before this `Process` is marked permanently
+    failed and not retried. If a `Process` permanently fails, Thermos
+    checks the `Process` object's containing `Task` for the task's
+    failure limit (usually 1) to determine whether or not the `Task`
+    should be failed. Setting `max_failures`to `0` means that this
+    process will keep retrying until a successful (zero) exit status is
+    achieved. Retries happen at most once every `min_duration` seconds
+    to prevent effectively mounting a denial of service attack against
+    the coordinating scheduler.
+
+-   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
+    successful (zero) exit status does not prevent future process runs.
+    Instead, the `Process` reinvokes after `min_duration` seconds.
+    However, the maximum failure limit (`max_failures`) still
+    applies. A combination of `daemon=True` and `max_failures=0` retries
+    a `Process` indefinitely regardless of exit status. This should
+    generally be avoided for very short-lived processes because of the
+    accumulation of checkpointed state for each process run. When
+    running in Aurora, `max_failures` is capped at
+    100.
+
+-   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
+    `Process`' status is not used to determine if its bound `Task` has
+    completed. For example, consider a `Task` with a
+    non-ephemeral webserver process and an ephemeral logsaver process
+    that periodically checkpoints its log files to a centralized data
+    store. The `Task` is considered finished once the webserver process
+    finishes, regardless of the logsaver's current status.
+
+-   `min_duration`: Defaults to `15`. Processes may succeed or fail
+    multiple times during a single Task. Each result is called a
+    *process run* and this value is the minimum number of seconds the
+    scheduler waits before re-running the same process.
+
+-   `final`: Defaulting to `False`, this is a finalizing `Process` that
+    should run last. Processes can be grouped into two classes:
+    *ordinary* and *finalizing*. By default, Thermos Processes are
+    ordinary. They run as long as the `Task` is considered
+    healthy (i.e. hasn't reached a failure limit). But once all regular
+    Thermos Processes have either finished or the `Task` has reached a
+    certain failure threshold, Thermos moves into a *finalization* stage
+    and runs all finalizing Processes. These are typically necessary for
+    cleaning up after the `Task`, such as log checkpointers, or perhaps
+    e-mail notifications of a completed Task. Finalizing processes may
+    not depend upon ordinary processes or vice-versa, however finalizing
+    processes may depend upon other finalizing processes and will
+    otherwise run as a typical process schedule.
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+There are four optional Task attributes:
+
+-   `constraints`: A list of `Constraint` objects that constrain the
+    Task's processes. Currently there is only one type, the `order`
+    constraint. For example the following requires that the processes
+    run in the order `foo`, then `bar`.
+
+        constraints = [Constraint(order=['foo', 'bar'])]
+
+    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
+    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
+    `order()` accepts Process name strings `('foo', 'bar')` or the processes
+    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+    `constraints=order(foo, bar)`
+
+    Note that Thermos rejects tasks with process cycles.
+
+-   `max_failures`: Defaulting to `1`, the number of failed processes
+    needed for the `Task` to be marked as failed. Note how this
+    interacts with individual Processes' `max_failures` values. Assume a
+    Task has two Processes and a `max_failures` value of `2`. So both
+    Processes must fail for the Task to fail. Now, assume each of the
+    Task's Processes has its own `max_failures` value of `10`. If
+    Process "A" fails 5 times before succeeding, and Process "B" fails
+    10 times and is then marked as failing, their parent Task succeeds.
+    Even though there were 15 individual failures by its Processes, only
+    1 of its Processes was finally marked as failing. Since 1 is less
+    than the 2 that is the Task's `max_failures` value, the Task does
+    not fail.
+
+-   `max_concurrency`: Defaulting to `0`, the maximum number of
+    concurrent processes in the Task. `0` specifies unlimited
+    concurrency. For Tasks with many expensive but otherwise independent
+    processes, you can limit the amount of concurrency Thermos schedules
+    instead of artificially constraining them through `order`
+    constraints. For example, a test framework may generate a Task with
+    100 test run processes, but runs it in a Task with
+    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
+    `max_concurrency=4`.
+
+-   `finalization_wait`: Defaulting to `30`, the number of seconds
+    allocated for finalizing the Task's processes. A Task starts in
+    `ACTIVE` state when Processes run and stays there as long as the Task
+    is healthy and Processes run. When all Processes finish successfully
+    or the Task reaches its maximum process failure limit, it goes into
+    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
+    Processes. When all Processes terminate, the Task goes into
+    `FINALIZING` state and invokes the schedule of all processes whose
+    final attribute has a True value. Everything from the end of `ACTIVE`
+    to the end of `FINALIZING` must happen within `finalization_wait`
+    number of seconds. If not, all still running Processes are sent
+    `SIGKILL`s (or if dependent on yet to be completed Processes, are
+    never invoked).
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. The first (strongly recommended) optional attribute is:
+
+-   `contact`: An email address for the Job's owner. For production
+    jobs, it is usually a team mailing list.
+
+Two more attributes deal with how to handle failure of the Job's Task:
+
+-   `max_task_failures`: An integer, defaulting to `1`, of the maximum
+    number of Task failures after which the Job is considered failed.
+    `-1` allows for infinite failures.
+
+-   `service`: A boolean, defaulting to `False`, which if `True`
+    restarts tasks regardless of whether they succeeded or failed. In
+    other words, if `True`, after the Job's Task completes, it
+    automatically starts again. This is for Jobs you want to run
+    continuously, rather than doing a single run.
+
+Three attributes deal with configuring the Job's Task:
+
+-   `instances`: Defaulting to `1`, the number of
+    instances/replicas/shards of the Job's Task to create.
+
+-   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
+    for which higher values may preempt Tasks from Jobs with lower
+    values.
+
+-   `production`: a Boolean, defaulting to `False`, specifying that this
+    is a production job backed by quota. Tasks from production Jobs may
+    preempt tasks from any non-production job, and may only be preempted
+    by tasks from production jobs in the same role with higher
+    priority. **WARNING**: To run Jobs at this level, the Job role must
+    have the appropriate quota.
+
+The final three Job attributes each take an object as their value.
+
+-   `update_config`: An `UpdateConfig`
+    object provides parameters for controlling the rate and policy of
+    rolling updates. The `UpdateConfig` parameters are:
+    -   `batch_size`: An integer, defaulting to `1`, specifying the
+        maximum number of shards to update in one iteration.
+    -   `restart_threshold`: An integer, defaulting to `60`, specifying
+        the maximum number of seconds before a shard must move into the
+        `RUNNING` state before considered a failure.
+    -   `watch_secs`: An integer, defaulting to `45`, specifying the
+        minimum number of seconds a shard must remain in the `RUNNING`
+        state before considered a success.
+    -   `max_per_shard_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of restarts per shard during an
+        update. When the limit is exceeded, it increments the total
+        failure count.
+    -   `max_total_failures`: An integer, defaulting to `0`, specifying
+        the maximum number of shard failures tolerated during an update.
+        Cannot be equal to or greater than the job's total number of
+        tasks.
+-   `health_check_config`: A `HealthCheckConfig` object that provides
+    parameters for controlling a Task's health checks via HTTP. Only
+    used if a health port was assigned with a command line wildcard. The
+    `HealthCheckConfig` parameters are:
+    -   `initial_interval_secs`: An integer, defaulting to `15`,
+        specifying the initial delay for doing an HTTP health check.
+    -   `interval_secs`: An integer, defaulting to `10`, specifying the
+        number of seconds in the interval between checking the Task's
+        health.
+    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
+        number of seconds the application must respond to an HTTP health
+        check with `OK` before it is considered a failure.
+    -   `max_consecutive_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of consecutive failures before a
+        task is unhealthy.
+-   `constraints`: A `dict` Python object, specifying Task scheduling
+    constraints. Most users will not need to specify constraints, as the
+    scheduler automatically inserts reasonable defaults. Please do not
+    set this field unless you are sure of what you are doing. See the
+    section in the Aurora + Thermos Reference manual on [Specifying
+    Scheduling Constraints](/documentation/0.5.0-incubating/configuration-reference/) for more information.
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.5.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+We can also use {{}} variables as sectional variables. Let's say we
+have:
+
+    {{#x}} Testing... {{/x}}
+
+If `x` evaluates to `True`, the text between the sectional tags is
+shown. If there is no value for `x` or it evaluates to `False`, the
+between tags text is not shown. So, at a basic level, a sectional
+variable acts as a conditional.
+
+However, if the sectional variable evaluates to a list, array, etc. it
+acts as a `foreach`. For example,
+
+    {{#x}} {{name}} {{/x}}
+
+with
+
+    { "x": [ { "name" : "tic" } { "name" : "tac" } { "name" : "toe" } ] }
+
+evaluates to
+
+    tic tac toe
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.5.0-incubating/contributing.md b/source/documentation/0.5.0-incubating/contributing.md
new file mode 100644
index 0000000..d337da2
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/contributing.md
@@ -0,0 +1,47 @@
+Getting your ReviewBoard Account
+--------------------------------
+Go to https://reviews.apache.org and create an account.
+
+Setting up your ReviewBoard Environment
+---------------------------------------
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+Submitting a Patch for Review
+-----------------------------
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o -g
+
+Updating an Existing Review
+---------------------------
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+Merging Your Own Review (Committers)
+------------------------------------
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+Merging Someone Else's Review
+-----------------------------
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
diff --git a/source/documentation/0.5.0-incubating/deploying-aurora-scheduler.md b/source/documentation/0.5.0-incubating/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..ca214e3
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/deploying-aurora-scheduler.md
@@ -0,0 +1,131 @@
+The Aurora scheduler is responsible for scheduling new jobs, rescheduling failed jobs, and killing
+old jobs.
+
+Installing Aurora
+=================
+Aurora is a standalone Java server. As part of the build process it creates a bundle of all its
+dependencies, with the notable exceptions of the JVM and libmesos. Each target server should have
+a JVM (Java 7 or higher) and libmesos (0.18.0) installed.
+
+Creating the Distribution .zip File (Optional)
+----------------------------------------------
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/incubator-aurora.git
+    cd incubator-aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+Installing Aurora
+-----------------
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+Configuring Aurora
+==================
+
+A Note on Configuration
+-----------------------
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+Replicated Log Configuration
+----------------------------
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+Network considerations
+----------------------
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+Running Aurora
+==============
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+Maintaining an Aurora Installation
+==================================
+
+Monitoring
+----------
+Aurora exports performance metrics via its HTTP interface `/vars` and `/vars.json` contain lots of
+useful data to help debug performance and configuration problems. These are all made available via
+[twitter.common.http](https://github.com/twitter/commons/tree/master/src/java/com/twitter/commons/http).
diff --git a/source/documentation/0.5.0-incubating/design/command-hooks.md b/source/documentation/0.5.0-incubating/design/command-hooks.md
new file mode 100644
index 0000000..cc56218
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/design/command-hooks.md
@@ -0,0 +1,263 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered two ways:
+* Project hooks file. If a file named `AuroraHooks` is in the project directory
+  where an aurora command is being executed, that file will be read,
+  and its hooks will be registered.
+* Configuration plugins. A configuration plugin can register hooks using an API.
+  Hooks registered this way are, effectively, hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class `GlobalCommandHookRegistry`.  A
+global hook can be registered by calling `GlobalCommandHookRegistry.register_command_hook`
+in a configuration plugin.
+
+### Hook Files
+
+A hook file is a file containing Python source code. It will be
+dynamically loaded by the Aurora command line executable. After
+loading, the client will check the module for a global variable named
+"hooks", which contains a list of hook objects, which will be added to
+the hook registry.
+
+A project hooks file will be named `AuroraHooks`,
+and will be located in either the directory where the command is being
+executed, or one of its parent directories, up to the nearest git/mercurial
+repository base.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+## Skipping Hooks
+
+In a perfect world, hooks would represent a global property or policy
+that should always be enforced. Unfortunately, we don't live in a
+perfect world, which means that sometimes, every rule needs to get
+broken.
+
+For example, an organization could decide that every configuration
+must be checked in to source control before it could be
+deployed. That's an entirely reasonable policy. It would be easy to
+implement it using a hook. But what if there's a problem, and the
+source repos is down?
+
+The easiest solution is just to allow a user to add a `--skip-hooks`
+flag to the command-line. But doing that means that an organization
+can't actually use hooks to enforce policy, because users can skip
+them whenever they want.
+
+Instead, we'd like to have a system where it's possible to create
+hooks to enforce policy, and then include a way of building policy
+about when hooks can be skipped.
+
+I'm using sudo as a rough model for this. Many organizations need to
+give people the ability to run privileged commands, but they still
+want to have some control. Sudo allows them to specify who is allowed
+to run a privileged command; where they're allowed to run it; and what
+command(s) they're allowed to run.  All of that is specified in a
+special system file located in `/etc/sudoers` on a typical unix
+machine.
+
+In a world of distributed systems, this approach has one grave
+weakness. An aurora client can be located on any machine that has
+network access to a Mesos/Aurora cluster. It can be run by a user in
+pretty much any way they want - from a machine they control, from a
+special chroot they created, etc. Relying an a file being in a special
+location on their machine isn't sufficient - it's too easy to
+maliciously or erroneously run a command in an environment with an
+invalid hooks exceptions file.
+
+Instead, we've got two basic choices: hook exception rules can be
+baked into the client executable, or they can be provided in a
+network location.
+
+### Specifying when hooks can be skipped
+
+#### Hooks File
+
+The module `apache.aurora.client.cli` contains a variable named
+`GLOBAL_HOOK_SKIP_RULES_URL`. In the default distribution of Aurora, tihs variable contains
+`None`. Users can modify this value for their local environments, providing
+a site specific URL. If users attempt to bypass command hooks, and this
+URL is not `None`, then the client will fetch the contents of that URL, and
+attempt to interpret it as a hooks exception file.
+
+The hooks exception file is written in JSON, with the following structure:
+
+    { "rulename":
+      {
+      "hooks": [ "hook-name ", ... ],
+      "users": [ string, ...],
+      "commands": { "job": ["kill", "killall", ...], ... },
+      "arg-patterns": [ "regexp-str", ... ]
+      },
+	  ...
+    }
+
+* `hooks` is a list of hook identifiers which can be skipped by a user
+  that satisfies this rule. If omitted, then this rule applies to _all hooks_.
+  (Omitting the `hooks` field is equivalent to giving it the value `['*']`.)
+* `users` is a list of user names, or glob expressions that range over user
+  names. This rule gives permission to those users to skip hooks. If omitted,
+  then this rule allows _any user_ to skip hooks that satisfy the rest of this rule.
+  Note that this is _user_ names, not
+  _role_ names: the rules specify users that are allowed to skip commands.
+  Some users that are allowed to work with a role account may be allowed to
+  skip, while others cannot.
+* `commands` is a map from nouns to lists of verbs. If a command `aurora n v`
+  is being executed, this rule allows the hooks to be skipped if
+  `v` is in `commands[n]`. If this is omitted, then it allows hooks to be skipped for all
+  commands that satisfy the rest of the rule.
+* `arg_patterns` is a list of glob patterns ranging over parameters.
+  If any of the parameters of the command match the parameters in this list,
+  the hook can be skipped. If ommitted, then this applies regardless of arguments.
+
+For example, the following is a hook rules file which allows:
+* The user "root" to skip any hook.
+* Any user to skip hooks for test jobs.
+* A specific group of users to skip hooks for jobs in cluster `east`
+* Another group of users to skip hooks for `job kill` in cluster `west`.
+
+    {
+      "allow-admin": { "users": ["root"] },
+	  "allow-test": { "users": ["\*"], "arg-patterns": ["\*/\*/test/\*"] },
+	  "allow-east-users": { "users"=['john', 'mary', 'mike', 'sue'],
+          "arg-patterns": ["east/\*/\*/\*"] },
+	  "allow-west-kills": { "users": ["anne", "bill", "chris"],
+          "commands": { "job": ["kill"]}, "arg-patterns" = ["west/\*/\*/\*"] }
+    }
+
+#### Programmatic Hooks Exceptions
+
+The `GlobalHooksRegistry` contains the method `add_hooks_exception`, which allows
+users to register local hooks exceptions using the `ConfigurationPlugin` mechanism.
+A hooks exception object implements the following interface:
+
+    class HooksException(object):
+	  def allow_exception(self, hooks, role, noun, verb, args):
+	    """Params:
+        - hooks: a list of hook-names that the user wants to skip. If this
+		  is ommitted, then this applies to all hooks.
+		- role: the role requesting that hooks be skipped.
+		- noun, verb: the noun and verb being executed.
+		- the other command-line arguments.
+		Returns: True if the user should be allowed to skip the requested hooks.
+		"""
+	    return False
+
+When a user supplies the `--skip-hooks` argument, `allow_exception` is invoked on
+each of the `HooksException` arguments. If _any_ of the hooks exception objects
+returns `True`, then the user will be permitted to skip the hooks.
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
+
+
+## Changes
+
+4/30:
+* Rule exceptions are defined in JSON, and they are specified to be loaded from
+  a URL, not from a local file.
+* Rule exceptions specify users, not roles.
+
+4/27:
+Major changes between this and the last version of this proposal.
+* Command hooks can't be declared in a configuration file. There's a simple
+  reason why: hooks run before a command's implementation is invoked.
+  Config files are read during the commands invocation if necessary. If the
+  hook is declared in the config file, by the time you know that it should
+  have been run, it's too late. So I've removed config-hooks from the
+  proposal. (API hooks defined by configs still work.)
+* Skipping hooks. We expect aurora to be used inside of large
+  organizations. One of the primary use-cases of hooks is to create
+  enforcable policy that are specific to an organization. If hooks
+  can just be skipped because a user wants to skip them, it means that
+  the policy can't be enforced, which defeats the purpose of having them.
+  So in this update, I propose a mechanism, loosely based on a `sudo`-like
+  mechanism for defining when hooks can be skipped.
diff --git a/source/documentation/0.5.0-incubating/developing-aurora-client.md b/source/documentation/0.5.0-incubating/developing-aurora-client.md
new file mode 100644
index 0000000..e1b2ccd
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/developing-aurora-client.md
@@ -0,0 +1,114 @@
+
+Getting Started
+=================
+
+Aurora consists of four main pieces: the scheduler (which finds resources in the cluster that can be used to run a job), the executor (which uses the resources assigned by the scheduler to run a job), the command-line client, and the web-ui. For information about working on the scheduler or the webUI, see the file "developing-aurora-scheduler.md" in this directory.
+
+If you want to work on the command-line client, this is the place for you!
+
+The client is written in Python, and unlike the server side of things, we build the client using the Pants build tool, instead of Gradle. Pants is a tool that was built by twitter for handling builds of large collaborative systems. You can see a detailed explanation of
+pants [here](http://pantsbuild.github.io/python-readme.html).
+
+To build the client executable, run the following in a command-shell:
+
+    $ ./pants src/main/python/apache/aurora/client/cli:aurora2
+
+This will produce a python executable _pex_ file in `dist/aurora2.pex`. Pex files
+are fully self-contained executables: just copy the pex file into your path, and you'll be able to run it. For example, for a typical installation:
+
+    $ cp dist/aurora2.pex /usr/local/bin/aurora
+
+To run all of the client tests:
+
+    $ ./pasts src/test/python/apache/aurora/client/:all
+
+
+Client Versions
+==================
+
+There are currently two versions of the aurora client, imaginatively known as v1 and v2. All new development is done entirely in v2, but we continue to support and fix bugs in v1, until we get to the point where v2 is feature-complete and tested, and aurora users have had some time at adapt and switch their processes to use v2.
+
+Both versions are built on the same underlying API code.
+
+Client v1 was implemented using twitter.common.app. The command-line processing code for v1 can be found in `src/main/python/apache/aurora/client/commands` and
+`src/main/python/apache/aurora/client/bin`.
+
+Client v2 was implemented using its own noun/verb framework. The client v2 code can be found in `src/main/python/apache/aurora/client/cli`, and the noun/verb framework can be
+found in the `__init__.py` file in that directory.
+
+
+Building and Testing the Client
+=================================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about are:
+
+   * Build a client v2 executable: `./pants src/main/python/apache/aurora/client/cli:aurora2`
+   * Test client v2 code: `./pants ./pants src/test/python/apache/aurora/client/cli:all`
+   * Build a client v1 executable: `./pants src/main/python/apache/aurora/client/bin:aurora_client`
+   * Test client v1 code: `./pants src/main/python/apache/aurora/client/commands:all`
+   * Test all client code: `./pants src/main/python/apache/aurora/client:all`
+
+
+Overview of the Client Architecture
+=====================================
+
+The client is built on a stacked architecture:
+
+   1. At the lowest level, we have a thrift RPC API interface
+    to the aurora scheduler. The interface is declared in thrift, in the file
+    `src/main/thrift/org/apache/aurora/gen/api.thrift`.
+
+  2. On top of the primitive API, we have a client API. The client API
+    takes the primitive operations provided by the scheduler, and uses them
+    to implement client-side behaviors. For example, when you update a job,
+    on the scheduler, that's done by a sequence of operations.  The sequence is implemented
+    by the client API `update` method, which does the following using the thrift API:
+     * fetching the state of task instances in the mesos cluster, and figuring out which need
+       to be updated;
+     * For each task to be updated:
+         - killing the old version;
+         - starting the new version;
+         - monitoring the new version to ensure that the update succeeded.
+  3. On top of the API, we have the command-line client itself. The core client, at this level,
+    consists of the interface to the command-line which the user will use to interact with aurora.
+    The client v2 code is found in `src/python/apache/aurora/client/cli`. In the `cli` directory,
+    the rough structure is as follows:
+       * `__init__.py` contains the noun/verb command-line processing framework used by client v2.
+       * `jobs.py` contains the implementation of the core `job` noun, and all of its operations.
+       * `bridge.py` contains the implementation of a component that allows us to ship a
+         combined client that runs both v1 and v2 client commands during the transition period.
+       * `client.py` contains the code that binds the client v2 nouns and verbs into an executable.
+
+Running/Debugging the Client
+=============================
+
+For manually testing client changes against a cluster, we use vagrant. To start a virtual cluster,
+you need to install a working vagrant environment, and then run "vagrant up" for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master,
+a set of mesos slaves, and an aurora scheduler.
+
+To use the devcluster, you need to bring it up by running `vagrant up`, and then connect to the vagrant host using `vagrant ssh`. This will open a bash session on the virtual machine hosting the devcluster. In the home directory, there are two key paths to know about:
+
+   * `~/aurora`: this is a copy of the git workspace in which you launched the vagrant cluster.
+     To test client changes, you'll use this copy.
+   * `/vagrant`: this is a mounted filesystem that's a direct image of your git workspace.
+     This isn't a copy - it is your git workspace. Editing files on your host machine will
+     be immediately visible here, because they are the same files.
+
+Whenever the scheduler is modified, to update your vagrant environment to use the new scheduler,
+you'll need to re-initialize your vagrant images. To do this, you need to run two commands:
+
+   * `vagrant destroy`: this will delete the old devcluster image.
+   * `vagrant up`: this creates a fresh devcluster image based on the current state of your workspace.
+
+You should try to minimize rebuilding vagrant images; it's not horribly slow, but it does take a while.
+
+To test client changes:
+
+   * Make a change in your local workspace, and commit it.
+   * `vagrant ssh` into the devcluster.
+   * `cd aurora`
+   * Pull your changes into the vagrant copy: `git pull /vagrant *branchname*`.
+   * Build the modified client using pants.
+   * Run your command using `aurora2`. (You don't need to do any install; the aurora2 command
+     is a symbolic link to the executable generated by pants.)
diff --git a/source/documentation/latest/developing-aurora-scheduler.md b/source/documentation/0.5.0-incubating/developing-aurora-scheduler.md
similarity index 100%
rename from source/documentation/latest/developing-aurora-scheduler.md
rename to source/documentation/0.5.0-incubating/developing-aurora-scheduler.md
diff --git a/source/documentation/latest/hooks.md b/source/documentation/0.5.0-incubating/hooks.md
similarity index 100%
rename from source/documentation/latest/hooks.md
rename to source/documentation/0.5.0-incubating/hooks.md
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.5.0-incubating/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.5.0-incubating/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.5.0-incubating/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.5.0-incubating/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.5.0-incubating/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.5.0-incubating/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.5.0-incubating/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.5.0-incubating/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.5.0-incubating/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.5.0-incubating/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.5.0-incubating/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.5.0-incubating/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.5.0-incubating/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.5.0-incubating/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.5.0-incubating/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.5.0-incubating/images/lifeofatask.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.5.0-incubating/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.5.0-incubating/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.5.0-incubating/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.5.0-incubating/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.5.0-incubating/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.5.0-incubating/images/stdout.png
Binary files differ
diff --git a/source/documentation/0.5.0-incubating/index.html.md b/source/documentation/0.5.0-incubating/index.html.md
new file mode 100644
index 0000000..8a72413
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/index.html.md
@@ -0,0 +1,20 @@
+# Overview
+
+*Aurora* is a service scheduler that schedules jobs onto *Mesos*, which runs tasks at a specified cluster. Typical services consist of up to hundreds of task replicas.
+
+Aurora provides a *Job* abstraction consisting of a *Task* template and instructions for creating near-identical replicas of that Task (modulo things like "instance id" or specific port numbers which may differ from machine to machine).
+
+*Terminology Note*: *Replicas* are also referred to as *shards* and *instances*. While there is a general desire to move to using "instances", "shard" is still found in commands and help strings.
+
+Typically a Task is a single *Process* corresponding to a single command line, such as `python2.6 my_script.py`. However, sometimes you must colocate separate Processes together within a single Task, which runs within a single container and `chroot`, often referred to as a "sandbox". For example, if you run multiple cooperating agents together such as `logrotate`, `installer`, and master or slave processes. *Thermos* provides a Process abstraction under the Mesos Tasks.
+
+To use and get up to speed on Aurora, you should look the docs in this directory in this order:
+
+1. How to [deploy Aurora](/documentation/0.5.0-incubating/deploying-aurora-scheduler/) or, how to [install Aurora on virtual machines on your private machine](/documentation/0.5.0-incubating/vagrant/) (the Tutorial uses the virtual machine approach).
+2. As a user, get started quickly with a [Tutorial](/documentation/0.5.0-incubating/tutorial/).
+3. For an overview of Aurora's process flow under the hood, see the [User Guide](/documentation/0.5.0-incubating/user-guide/).
+4. To learn how to write a configuration file, look at our [Configuration Tutorial](/documentation/0.5.0-incubating/configuration-tutorial/). From there, look at the [Aurora + Thermos Reference](/documentation/0.5.0-incubating/configuration-reference/).
+5. Then read up on the [Aurora Command Line Client](/documentation/0.5.0-incubating/client-commands/).
+6. Find out general information and useful tips about how Aurora does [Resource Isolation](/documentation/0.5.0-incubating/resource-isolation/).
+
+To contact the Aurora Developer List, email [dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org). You may want to read the list [archives](http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/). You can also use the IRC channel `#aurora` on `irc.freenode.net`
diff --git a/source/documentation/latest/resource-isolation.md b/source/documentation/0.5.0-incubating/resource-isolation.md
similarity index 100%
copy from source/documentation/latest/resource-isolation.md
copy to source/documentation/0.5.0-incubating/resource-isolation.md
diff --git a/source/documentation/0.5.0-incubating/sla.md b/source/documentation/0.5.0-incubating/sla.md
new file mode 100644
index 0000000..3d56e95
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/sla.md
@@ -0,0 +1,176 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature currently supports stat collection only for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config).
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
\ No newline at end of file
diff --git a/source/documentation/0.5.0-incubating/tutorial.md b/source/documentation/0.5.0-incubating/tutorial.md
new file mode 100644
index 0000000..a211ddd
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.5.0-incubating/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.5.0-incubating/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.5.0-incubating/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.5.0-incubating/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.5.0-incubating/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.5.0-incubating/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use the `aurora help` subcommand, and read the
+  [Aurora Client Commands](/documentation/0.5.0-incubating/client-commands/) document.
diff --git a/source/documentation/0.5.0-incubating/user-guide.md b/source/documentation/0.5.0-incubating/user-guide.md
new file mode 100644
index 0000000..1fae086
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/user-guide.md
@@ -0,0 +1,335 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.5.0-incubating/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora update --shards=0-1 <job_key_value> new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.5.0-incubating/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystashio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.5.0-incubating/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.5.0-incubating/configuration-tutorial/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.5.0-incubating/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora create example/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora open`:
+
+      aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora Client's command line interface
+
+  Several Client commands have a `-o` option that automatically opens a window to
+  the specified Job's scheduler UI URL. And, as described above, the `open` command also takes
+  you there.
+
+  For a complete list of Aurora Client commands, use `aurora help` and, for specific
+  command help, `aurora help [command]`. **Note**: `aurora help open`
+  returns `"subcommand open not found"` due to our reflection tricks not
+  working on words that are also builtin Python function names. Or see the
+  [Aurora Client Commands](/documentation/0.5.0-incubating/client-commands/) document.
diff --git a/source/documentation/0.5.0-incubating/vagrant.md b/source/documentation/0.5.0-incubating/vagrant.md
new file mode 100644
index 0000000..b66ff67
--- /dev/null
+++ b/source/documentation/0.5.0-incubating/vagrant.md
@@ -0,0 +1,51 @@
+Getting Started
+===============
+To replicate a real cluster environment as closely as possible, we use
+[Vagrant](http://www.vagrantup.com/) to launch a complete Aurora cluster in a virtual machine.
+
+Prerequisites
+-------------
+  * [VirtualBox](https://www.virtualbox.org/)
+  * [Vagrant](http://www.vagrantup.com/)
+  * A clone of the Aurora repository, or source distribution.
+
+You can start a local cluster by running:
+
+    vagrant up
+
+Once started, several services should be running:
+
+  * scheduler is listening on http://192.168.33.7:8081
+  * observer is listening on http://192.168.33.7:1338
+  * master is listening on http://192.168.33.7:5050
+  * slave is listening on http://192.168.33.7:5051
+
+You can SSH into the machine with `vagrant ssh` and execute aurora client commands using the
+`aurora` command.  A pre-installed `clusters.json` file refers to your local cluster as
+`devcluster`, which you will use in client commands.
+
+Deleting your local cluster
+===========================
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Rebuilding components
+=====================
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on your vagrant machine to build and restart a component.  This is considerably faster than
+destroying and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update.  You may invoke the command with
+no arguments to get a list of supported components.
+
+     vagrant ssh -c 'sudo aurorabuild client'
+
+
+Troubleshooting
+===============
+Most of the vagrant related problems can be fixed by the following steps:
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/latest/client-cluster-configuration.md b/source/documentation/0.6.0-incubating/client-cluster-configuration.md
similarity index 100%
copy from source/documentation/latest/client-cluster-configuration.md
copy to source/documentation/0.6.0-incubating/client-cluster-configuration.md
diff --git a/source/documentation/0.6.0-incubating/client-commands.md b/source/documentation/0.6.0-incubating/client-commands.md
new file mode 100644
index 0000000..0ba6ec4
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/client-commands.md
@@ -0,0 +1,516 @@
+Aurora Client Commands
+======================
+
+The most up-to-date reference is in the client itself: use the
+`aurora help` subcommand (for example, `aurora help` or
+`aurora help create`) to find the latest information on parameters and
+functionality. Note that `aurora help open` does not work, due to underlying issues with
+reflection.
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Asynchronous job updates (beta)](#asynchronous-job-updates-beta)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.6.0-incubating/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `cancel_update`
+  - `create`
+  - `kill`
+  - `restart`
+  - `update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.6.0-incubating/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+`create` can take four named parameters:
+
+- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
+  value. Multiple flags specify multiple values. Defaults to `[]`.
+- ` -o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  URL prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- ` -j, --json` If specified, configuration argument is read as a
+  string in JSON format. Defaults to False.
+- ` --wait_until=STATE` Block the client until all the Tasks have
+  transitioned into the requested state. Possible values are: `PENDING`,
+  `RUNNING`, `FINISHED`. Default: `PENDING`
+
+### Running a Command On a Running Job
+
+    aurora run <job_key> <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+`run` can take three named parameters:
+
+- `-t NUM_THREADS`, `--threads=NUM_THREADS `The number of threads to
+  use, defaulting to `1`.
+- `--user=SSH_USER` ssh as this user instead of the given role value.
+  Defaults to None.
+- `-e, --executor_sandbox`  Run the command in the executor sandbox
+  instead of the Task sandbox. Defaults to False.
+
+### Killing a Job
+
+    aurora kill <job key> <configuration file>
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all shards in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+`kill` can take two named parameters:
+
+- `-o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  URL prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- `--shards=SHARDS` A list of shard ids to act on. Can either be a
+  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
+  combination of the two (e.g. 0-2,5,7-9). Defaults to acting on all
+  shards.
+
+### Updating a Job
+
+    aurora update [--shards=ids] <job key> <configuration file>
+    aurora cancel_update <job key> <configuration file>
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by
+using `--shards` and specifying a comma-separated list of job shard ids.
+
+You may want to run `aurora diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora cancel_update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `cancel_update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `cancel_update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `update`.
+
+`update` can take four named parameters:
+
+- `--shards=SHARDS` A list of shard ids to update. Can either be a
+  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
+  combination of the two (e.g. 0-2,5,7-9). If not  set, all shards are
+  acted on. Defaults to None.
+- `-E NAME=VALUE` Binds a Thermos mustache variable name to a value.
+  Use multiple flags to specify multiple values. Defaults to `[]`.
+- `-j, --json` If specified, configuration is read in JSON format.
+  Defaults to `False`.
+- `--updater_health_check_interval_seconds=HEALTH_CHECK_INTERVAL_SECONDS`
+  Time interval between subsequent shard status checks. Defaults to `3`.
+
+#### Asynchronous job updates (beta)
+
+As of 0.6.0, Aurora will coordinate updates (and rollbacks) within the
+scheduler. Performing updates this way also allows the scheduler to display
+update progress and job update history in the browser.
+
+There are several sub-commands to manage job updates:
+
+    aurora2 beta-update start <job key> <configuration file
+    aurora2 beta-update status <job key>
+    aurora2 beta-update pause <job key>
+    aurora2 beta-update resume <job key>
+    aurora2 beta-update abort <job key>
+    aurora2 beta-update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `status` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora diff <job_key> <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora create <new_job_key> <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora kill <old_job_key>
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora restart <job_key> <configuration file>
+
+Restarts are controlled on the client side, so aborting
+the `restart` command halts the restart operation.
+
+`restart` does a rolling restart. You almost always want to do this, but
+not if all shards of a service are misbehaving and are
+completely dysfunctional. To not do a rolling restart, use
+the `-shards` option described below.
+
+**Note**: `restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `<configuration file>` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`restart` command.
+
+In addition to the required job key argument, there are eight
+`restart` specific optional arguments:
+
+- `--updater_health_check_interval_seconds`: Defaults to `3`, the time
+  interval between subsequent shard status checks.
+- `--shards=SHARDS`: Defaults to None, which restarts all shards.
+  Otherwise, only the specified-by-id shards restart. They can be
+  comma-separated `(0, 8, 9)`, a range `(3-5)` or a
+  combination `(0, 3-5, 8, 9-11)`.
+- `--batch_size`: Defaults to `1`, the number of shards to be started
+  in one iteration. So, for example, for value 3, it tries to restart
+  the first three shards specified by `--shards` simultaneously, then
+  the next three, and so on.
+- `--max_per_shard_failures=MAX_PER_SHARD_FAILURES`: Defaults to `0`,
+  the maximum number of restarts per shard during restart. When
+  exceeded, it increments the total failure count.
+- `--max_total_failures=MAX_TOTAL_FAILURES`: Defaults to `0`, the
+  maximum total number of shard failures tolerated during restart.
+- `-o, --open_browser` Open a browser window to the scheduler UI Job
+  page after a job changing operation happens. When `False`, the Job
+  url prints on the console and the user has to copy/paste it
+  manually. Defaults to `False`. Does not work when running in Vagrant.
+- `--restart_threshold`: Defaults to `60`, the maximum number of
+  seconds before a shard must move into the `RUNNING` state before
+  it's considered a failure.
+- `--watch_secs`: Defaults to `45`, the minimum number of seconds a
+  shard must remain in `RUNNING` state before considered a success.
+
+Cron Jobs
+---------
+
+You will see various commands and options relating to cron jobs in
+`aurora -help` and similar. Ignore them, as they're not yet implemented.
+You might be able to use them without causing an error, but nothing happens
+if you do.
+
+Comparing Jobs
+--------------
+
+    aurora diff <job_key> config
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+There are two named parameters:
+
+- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
+  value. Multiple flags may be used to specify multiple values.
+  Defaults to `[]`.
+- `-j, --json` Read the configuration argument in JSON format.
+  Defaults to `False`.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora list_jobs
+    Usage: `aurora list_jobs cluster/role
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+It has two named parameters:
+
+- `--pretty`: Displays job information in prettyprinted format.
+  Defaults to `False`.
+- `-c`, `--show-cron`: Shows cron schedule for jobs. Defaults to
+  `False`. Do not use, as it's not yet implemented.
+
+### Inspecting a Job
+
+    aurora inspect <job_key> config
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration. It has four
+named parameters:
+
+- `--local`: Inspect the configuration that the  `spawn` command would
+  create, defaulting to `False`.
+- `--raw`: Shows the raw configuration. Defaults to `False`.
+- `-j`, `--json`: If specified, configuration is read in JSON format.
+  Defaults to `False`.
+- `-E NAME=VALUE`: Bind a Thermos Mustache variable name to a value.
+  You can use multiple flags to specify multiple values. Defaults
+  to `[]`
+
+### Versions
+
+    aurora version
+
+Lists client build information and what Aurora API version it supports.
+
+### Checking Your Quota
+
+    aurora get_quota --cluster=CLUSTER role
+
+  Prints the production quota allocated to the role's value at the given
+cluster.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+It can take three named parameters:
+
+- `-e`, `--executor_sandbox`:  Run `ssh` in the executor sandbox
+  instead of the  task sandbox. Defaults to `False`.
+- `--user=SSH_USER`: `ssh` as the given user instead of as the role in
+  the `job_key` argument. Defaults to none.
+- `-L PORT:NAME`: Add tunnel from local port `PORT` to the remote
+  named port  `NAME`. Defaults to `[]`.
+
+### Templating Command Arguments
+
+    aurora run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.6.0-incubating/clientv2.md b/source/documentation/0.6.0-incubating/clientv2.md
new file mode 100644
index 0000000..6e69af3
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/clientv2.md
@@ -0,0 +1,405 @@
+Aurora Client v2
+=================
+
+Overview
+-----------
+
+Our goal is to replace the current Aurora command-line client. The
+current client suffers from an early monolithic structure, and a long
+development history of rapid unplanned evolution.
+
+In addition to its internal problems, the current Aurora client is
+confusing for users. There are several different kinds of objects
+manipulated by the Aurora command line, and the difference between
+them is often not clear. (What's the difference between a job and a
+configuration?) For each type of object, there are different commands,
+and it's hard to remember which command should be used for which kind
+of object.
+
+Instead of continuing to let the Aurora client develop and evolve
+randomly, it's time to take a principled look at the Aurora command
+line, and figure out how to make our command line processing make
+sense. At the same time, the code needs to be cleaned up, and divided
+into small comprehensible units based on a plugin architecture.
+
+Doing this now will give us a more intuitive, consistent, and easy to
+use client, as well as a sound platform for future development.
+
+Goals
+-------
+
+* A command line tool for interacting with Aurora that is easy for
+  users to understand.
+* A noun/verb command model.
+* A modular source-code architecture.
+* Non-disruptive transition for users.
+
+Non-Goals
+----------
+
+* The most important non-goal is that we're not trying to redesign the
+  Aurora scheduler, the Aurora executor, or any of the peripheral tools
+  that the Aurora command line interacts with; we only want to create a
+  better command line client.
+* We do not want to change thermos, mesos, hadoop, etc.
+* We do not want to create new objects that users will work with to
+  interact with Mesos or Aurora.
+* We do not want to change Aurora job configuration files or file formats.
+* We do not want to change the Aurora API.
+* We don't want to boil the ocean: there are many things that we could
+  include in the scope of this project, but we don't want to be
+  distracted by re-implementing all of twitter.commons in order to
+  create a perfect Aurora client.
+
+
+Background
+-----------
+
+Aurora is a system that's used to run and manage services and
+service-like jobs running in a datacenter. Aurora takes care of
+allocating resources in order to schedule and run jobs without
+requiring teams to manage dedicated hardware. The heart of Aurora is
+called the scheduler, and is responsible for finding and assigning
+resources to tasks.
+
+The Aurora scheduler provides a thrift API. The scheduler API is
+low-level and difficult to interact with. Users do not interact
+directly with the Aurora API; instead, they use a command-line tool,
+which provides a collection of easy-to-use commands. This command-line
+tool, in turn, talks to the scheduler API to launch and manage jobs in
+datacenter clusters. The command-line tool is called the Aurora
+client.
+
+The current implementation of the Aurora client is haphazard,
+and really needs to be cleaned up:
+
+- The code is monolithic and hard to maintain. It's implemented using
+  `twitter.common.app`, which assumes that all of the command code lives
+  in a single source file. To work around this, and allow some
+  subdivision, it uses a hack of `twitter.common.app` to force
+  registration of commands from multiple modules. It's hard to
+  understand, and hard to modify.
+- The current code is very difficult to test. Because of the way it's
+  built, there is no consistent way of passing key application data
+  around. As a result, each unit test of client operations needs a
+  difficult-to-assemble custom setup of mock objects.
+- The current code handles errors poorly, and it is difficult to
+  fix. Many common errors produce unacceptable results. For example,
+  issuing an unknown command generates an error message "main takes 0
+  parameters but received 1"; passing an invalid parameter to other
+  commands frequently produces a stack trace.
+- The current command line is confusing for users. There are several
+  different kinds of objects manipulated by the Aurora command line,
+  and the difference between them is often not entirely clear. (What's
+  the difference between a job and a configuration?)
+  For each type of object, there are different
+  commands, and it's frequently not clear just which command should be
+  used for which object.
+
+
+Instead of continuing to let it develop and evolve randomly, it's time
+to take a principled look at the Aurora command line, and figure out
+how to make command line processing make sense. At the same time, the
+code needs to be cleaned up, and divided into small comprehensible
+units based on a plugin architecture.
+
+Requirements
+-------------
+
+Aurora is aimed at engineers who run jobs and services in a
+datacenter. As a result, the requirements for the aurora client are
+all engineering focused:
+
+* __Consistency__: commands should follow a consistent structure, so that
+  users can apply knowledge and intuition gained from working with
+  some aurora commands to new commands. This means that when commands
+  can re-use the same options, they should; that objects should be
+  referred to by consistent syntax throughout the tool.
+* __Helpfulness__: commands should be structured so that the system can
+  generate helpful error messages. If a user just runs "aurora", they
+  should get a basic usage message. If they try to run an invalid
+  command, they should get a message that the command is invalid, not
+  a stack dump or "command main() takes 0 parameters but received
+  2". Commands should not generate extraneous output that obscures the
+  key facts that the user needs to know, and the default behavior of
+  commands should not generate outputs that will be routinely ignored
+  by users.
+* __Extensibility__: it should be easy to plug in new commands,
+  including custom commands, to adapt the Aurora client to new
+  environments.
+* __Script-friendly command output__: every command should at least include
+  an option that generates output that's script-friendly. Scripts should be
+  able to work with command-output without needing to do screen scraping.
+* __Scalability__: the tools should be usable for any foreseeable size
+  of Aurora datacenters and machine clusters.
+
+Design Overview
+-----------------
+
+The Aurora client will be reimplemented using a noun-verb model,
+similar to the cmdlet model used by Monad/Windows Powershell. Users
+will work by providing a noun for the type of object being operated
+on, and a verb for the specific operation being performed on the
+object, followed by parameters. For example, to create a job, the user
+would execute: "`aurora job create smfd/mchucarroll/devel/jobname
+job.aurora`". The noun is `job` and the verb is `create`.
+
+The client will be implemented following that noun-verb
+convention. Each noun will be a separate component, which can be
+registered into the command-line framework. Each verb will be
+implemented by a class that registers with the appropriate noun. Nouns
+and verbs will each provide methods that add their command line
+options and parameters to the options parser, using the Python
+argparse library.
+
+Detailed Design
+-----------------
+
+### Interface
+
+In this section, we'll walk through the types of objects that the
+client can manipulate, and the operations that need to be provided for
+each object. These form the primary interface that engineers will use
+to interact with Aurora.
+
+In the command-line, each of the object types will have an Aurora
+subcommand. The commands to manipulate the object type will follow the
+type. For example, here are several commands in the old syntax
+contrasted against the new noun/verb syntax.
+
+* Get quota for a role:
+   * Noun/Verb syntax:  `aurora quota get west/www-data`
+   * Old syntax: `aurora get_quota --cluster=smf1 www-data`
+* Create job:
+   * Noun/Verb syntax: `aurora job create west/www-data/test/job job.aurora`
+   * Old syntax: `aurora create west/www-data/test/job job.aurora`
+* Schedule a job to run at a specific interval:
+   * Noun/verb: `aurora cron schedule east/www-data/test/job job.aurora`
+   * Old: `aurora create east/www-data/test/job job.aurora`
+
+As you can see in these examples, the new syntax is more consistent:
+you always specify the cluster where a command executes as part of an
+identifier, where in the old syntax, it was sometimes part of the
+jobkey and sometimes specified with a "--cluster" option.
+
+The new syntax is also more clear and explicit: even without knowing
+much about Aurora, it's clear what objects each command is acting on,
+where in the old syntax, commands like "create" are unclear.
+
+### The Job Noun
+
+A job is a configured program ready to run in Aurora. A job is,
+conceptually, a task factory: when a job is submitted to the Aurora
+scheduler, it creates a collection of tasks. The job contains a
+complete description of everything it needs to create a collection of
+tasks. (Note that this subsumes "service" commands. A service is just
+a task whose configuration sets the is_service flag, so we don't have
+separate commands for working with services.) Jobs are specified using
+`cluster/role/env/name` jobkey syntax.
+
+* `aurora job create *jobkey* *config*`:  submits a job to a cluster, launching the task(s) specified by the job config.
+* `aurora job status *jobkey*`: query job status. Prints information about the job,
+  whether it's running, etc., to standard out. If jobkey includes
+  globs, it should list all jobs that match the glob
+* `aurora job kill *jobkey*/*instanceids*`: kill/stop some of a jobs instances. This stops a job' tasks; if the job
+  has service tasks, they'll be  disabled, so that they won't restart.
+* `aurora job killall *jobkey*`: kill all of the instances of a job. This
+  is distinct from the *kill* command as a safety measure: omitting the
+  instances from a kill command shouldn't result in destroying the entire job.
+* `aurora job restart *jobkey*`: conceptually, this will kill a job, and then
+  launch it again. If the job does not exist, then fail with an error
+  message.  In fact, the underlying implementation does the
+  kill/relaunch on a rolling basis - so it's not an immediate kill of
+  all shards/instances, followed by a delay as all instances relaunch,
+  but rather a controlled gradual process.
+* `aurora job list *jobkey*`: list all jobs that match the jobkey spec that are
+  registered with the scheduler. This will include both jobs that are
+  currently running, and jobs that are scheduled to run at a later
+  time. The job key can be partial: if it specifies cluster, all jobs
+  on the cluster will be listed; cluster/role, all jobs running on the cluster under the role will be listed, etc.
+
+The Schedule Noun (Cron)
+--------------------------
+
+Note (3/21/2014): The "cron" noun is _not_ implemented yet.
+
+Cron is a scheduler adjunct that periodically runs a job on a
+schedule. The cron commands all manipulate cron schedule entries. The
+schedules are specified as a part of the job configuration.
+
+* `aurora cron schedule jobkey config`: schedule a job to run by cron.
+* `aurora cron deschedule jobkey`: removes a jobs entry from the cron schedule.
+* `aurora cron status jobkey`: query for a scheduled job's status.
+
+The Quota Noun
+---------------
+
+A quota is a data object maintained by the scheduler that specifies the maximum
+resources that may be consumed by jobs owned by a particular role. In the future,
+we may add new quota types. At some point, we'll also probably add an administrators
+command to set quotas.
+
+* `aurora quota get *cluster/role*`
+
+
+Implementation
+---------------
+
+The current command line is monolithic. Every command on an Aurora
+object is a top-level command in the Aurora client. In the
+restructured command line, each of the primary object types
+manipulated by Aurora should have its own sub-command.
+
+* Advantages of this approach:
+   * Easier to detangle the command-line processing. The top-level
+     command-processing will be a small set of subcommand
+     processors. Option processing for each subcommand can be offloaded
+     to a separate module.
+   * The aurora top-level help command will be much more
+     comprehensible. Instead of giving a huge list of every possible
+     command, it will present the list of top-level object types, and
+     then users can request help on the commands for a specific type
+     of object.
+   * The sub-commands can be separated into distinct command-line
+     tools when appropriate.
+
+### Command Structure and Options Processing
+
+The implementation will follow closely on Pants goals. Pants goals use
+a static registration system to add new subcommands. In pants, each
+goal command is an implementation of a command interface, and provides
+implementations of methods to register options and parameters, and to
+actually execute the command. In this design, commands are modular and
+easy to implement, debug, and combine in different ways.
+
+For the Aurora client, we plan to use a two-level variation of the
+basic concept from pants. At the top-level we will have nouns. A noun
+will define some common command-line parameters required by all of its
+verbs, and will provide a registration hook for attaching verbs. Nouns
+will be implemented as a subclass of a basic Noun type.
+
+Each verb will, similarly, be implemented as a subclass of Verb. Verbs
+will be able to specify command-line options and parameters.
+
+Both `Noun` and `Verb` will be subclasses of a common base-class `AuroraCommand`:
+
+    class AuroraCommand(object):
+      def get_options(self):
+      """Gets the set of command-line options objects for this command.
+      The result is a list of CommandOption objects.
+       """
+        pass
+
+      @property
+      def help(self):
+        """Returns the help message for this command"""
+
+      @property
+      def usage(self):
+        """Returns a short usage description of the command"""
+
+      @property
+      def name(self):
+        """Returns the command name"""
+
+
+A command-line tool will be implemented as an instance of a `CommandLine`:
+
+    class CommandLine(object):
+      """The top-level object implementing a command-line application."""
+
+      @property
+      def name(self):
+        """Returns the name of this command-line tool"""
+
+      def print_out(self, str):
+        print(str)
+
+      def print_err(self, str):
+        print(str, file=sys.stderr)
+
+      def register_noun(self, noun):
+        """Adds a noun to the application"""
+
+      def register_plugin(self, plugin):
+	     """Adds a configuration plugin to the system"""
+
+
+Nouns are registered into a command-line using the `register_noun`
+method. They are weakly coupled to the application, making it easy to
+use a single noun in several different command-line tools. Nouns allow
+the registration of verbs using the `register_verb` method.
+
+When commands execute, they're given an instance of a *context object*.
+The context object must be an instance of a subclass of `AuroraCommandContext`.
+Options, parameters, and IO are all accessed using the context object. The context
+is created dynamically by the noun object owning the verb being executed. Developers
+are strongly encouraged to implement custom contexts for their nouns, and move functionality
+shared by the noun's verbs into the context object. The context interface is:
+
+    class Context(object):
+      class Error(Exception): pass
+
+      class ArgumentException(Error): pass
+
+      class CommandError(Error):
+
+      @classmethod
+      def exit(cls, code, msg):
+	    """Exit the application with an error message"""
+        raise cls.CommandError(code, msg)
+
+     def print_out(self, msg, indent=0):
+       """Prints a message to standard out, with an indent"""
+
+     def print_err(self, msg, indent=0):
+       """Prints a message to standard err, with an indent"""
+
+
+In addition to nouns and verbs, there's one more kind of registerable
+component, called a *configuration plugin*. These objects add a set of
+command-line options that can be passed to *all* of the commands
+implemented in the tool. Before the command is executed, the
+configuration plugin will be invoked, and will process its
+command-line arguments. This is useful for general configuration
+changes, like establish a secure tunnel to talk to machines in a
+datacenter. (A useful way to think of a plugin is as something like an
+aspect that can be woven in to aurora to provide environment-specific
+configuration.) A configuration plugin is implemented as an instance
+of class `ConfigurationPlugin`, and registered with the
+`register_plugin` method of the `CommandLine` object. The interface of
+a plugin is:
+
+    class ConfigurationPlugin(object):
+      """A component that can be plugged in to a command-line."""
+
+      @abstractmethod
+      def get_options(self):
+        """Return the set of options processed by this plugin"""
+
+      @abstractmethod
+      def execute(self, context):
+        """Run the context/command line initialization code for this plugin."""
+
+
+### Command Execution
+
+The options process and command execution is built as a facade over Python's
+standard argparse. All of the actual argument processing is done by the
+argparse library.
+
+Once the options are processed, the framework will start to execute the command. Command execution consists of:
+
+# Create a context object. The framework will use the argparse options to identify
+  which noun is being invoked, and will call that noun's `create_context` method.
+  The argparse options object will be stored in the context.
+# Execute any configuration plugins. Before any command is invoked, the framework
+  will first iterate over all of the registered configuration plugins. For each
+  plugin, it will invoke the `execute` method.
+# The noun will use the context to find out what verb is being invoked, and it will
+  then call that verb's `execute` method.
+# The command will exit. Its return code will be whatever was returned by the verb's
+  `execute` method.
+
+Commands are expected to return a code from a list of standard exit codes,
+which can be found in `src/main/python/apache/aurora/client/cli/__init__.py`.
diff --git a/source/documentation/0.6.0-incubating/committers.md b/source/documentation/0.6.0-incubating/committers.md
new file mode 100644
index 0000000..6e80f68
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/incubator-aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/incubator/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/incubator/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.6.0-incubating/configuration-reference.md b/source/documentation/0.6.0-incubating/configuration-reference.md
new file mode 100644
index 0000000..2e02328
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/configuration-reference.md
@@ -0,0 +1,538 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.6.0-incubating/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.6.0-incubating/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of times processes that are part of this
+Task can fail before the entire Task is marked for failure.
+
+For example:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as
+permanently failed, and the `succeeding` Process would succeed on the
+first run. The task would succeed despite only allowing for two failed
+processes. To be more specific, there would be 10 failed process runs
+yet 1 failed process.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.6.0-incubating/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.6.0-incubating/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota.
+  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.6.0-incubating/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.6.0-incubating/configuration-tutorial.md b/source/documentation/0.6.0-incubating/configuration-tutorial.md
new file mode 100644
index 0000000..afc965e
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/configuration-tutorial.md
@@ -0,0 +1,1147 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora inspect`. It takes the same job key and configuration file
+arguments as `aurora create` or `aurora update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.6.0-incubating/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.6.0-incubating/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.6.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has five optional attributes, each with a default value
+if one isn't specified in the configuration:
+
+-   `max_failures`: Defaulting to `1`, the maximum number of failures
+    (non-zero exit statuses) before this `Process` is marked permanently
+    failed and not retried. If a `Process` permanently fails, Thermos
+    checks the `Process` object's containing `Task` for the task's
+    failure limit (usually 1) to determine whether or not the `Task`
+    should be failed. Setting `max_failures`to `0` means that this
+    process will keep retrying until a successful (zero) exit status is
+    achieved. Retries happen at most once every `min_duration` seconds
+    to prevent effectively mounting a denial of service attack against
+    the coordinating scheduler.
+
+-   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
+    successful (zero) exit status does not prevent future process runs.
+    Instead, the `Process` reinvokes after `min_duration` seconds.
+    However, the maximum failure limit (`max_failures`) still
+    applies. A combination of `daemon=True` and `max_failures=0` retries
+    a `Process` indefinitely regardless of exit status. This should
+    generally be avoided for very short-lived processes because of the
+    accumulation of checkpointed state for each process run. When
+    running in Aurora, `max_failures` is capped at
+    100.
+
+-   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
+    `Process`' status is not used to determine if its bound `Task` has
+    completed. For example, consider a `Task` with a
+    non-ephemeral webserver process and an ephemeral logsaver process
+    that periodically checkpoints its log files to a centralized data
+    store. The `Task` is considered finished once the webserver process
+    finishes, regardless of the logsaver's current status.
+
+-   `min_duration`: Defaults to `15`. Processes may succeed or fail
+    multiple times during a single Task. Each result is called a
+    *process run* and this value is the minimum number of seconds the
+    scheduler waits before re-running the same process.
+
+-   `final`: Defaulting to `False`, this is a finalizing `Process` that
+    should run last. Processes can be grouped into two classes:
+    *ordinary* and *finalizing*. By default, Thermos Processes are
+    ordinary. They run as long as the `Task` is considered
+    healthy (i.e. hasn't reached a failure limit). But once all regular
+    Thermos Processes have either finished or the `Task` has reached a
+    certain failure threshold, Thermos moves into a *finalization* stage
+    and runs all finalizing Processes. These are typically necessary for
+    cleaning up after the `Task`, such as log checkpointers, or perhaps
+    e-mail notifications of a completed Task. Finalizing processes may
+    not depend upon ordinary processes or vice-versa, however finalizing
+    processes may depend upon other finalizing processes and will
+    otherwise run as a typical process schedule.
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+There are four optional Task attributes:
+
+-   `constraints`: A list of `Constraint` objects that constrain the
+    Task's processes. Currently there is only one type, the `order`
+    constraint. For example the following requires that the processes
+    run in the order `foo`, then `bar`.
+
+        constraints = [Constraint(order=['foo', 'bar'])]
+
+    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
+    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
+    `order()` accepts Process name strings `('foo', 'bar')` or the processes
+    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+    `constraints=order(foo, bar)`
+
+    Note that Thermos rejects tasks with process cycles.
+
+-   `max_failures`: Defaulting to `1`, the number of failed processes
+    needed for the `Task` to be marked as failed. Note how this
+    interacts with individual Processes' `max_failures` values. Assume a
+    Task has two Processes and a `max_failures` value of `2`. So both
+    Processes must fail for the Task to fail. Now, assume each of the
+    Task's Processes has its own `max_failures` value of `10`. If
+    Process "A" fails 5 times before succeeding, and Process "B" fails
+    10 times and is then marked as failing, their parent Task succeeds.
+    Even though there were 15 individual failures by its Processes, only
+    1 of its Processes was finally marked as failing. Since 1 is less
+    than the 2 that is the Task's `max_failures` value, the Task does
+    not fail.
+
+-   `max_concurrency`: Defaulting to `0`, the maximum number of
+    concurrent processes in the Task. `0` specifies unlimited
+    concurrency. For Tasks with many expensive but otherwise independent
+    processes, you can limit the amount of concurrency Thermos schedules
+    instead of artificially constraining them through `order`
+    constraints. For example, a test framework may generate a Task with
+    100 test run processes, but runs it in a Task with
+    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
+    `max_concurrency=4`.
+
+-   `finalization_wait`: Defaulting to `30`, the number of seconds
+    allocated for finalizing the Task's processes. A Task starts in
+    `ACTIVE` state when Processes run and stays there as long as the Task
+    is healthy and Processes run. When all Processes finish successfully
+    or the Task reaches its maximum process failure limit, it goes into
+    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
+    Processes. When all Processes terminate, the Task goes into
+    `FINALIZING` state and invokes the schedule of all processes whose
+    final attribute has a True value. Everything from the end of `ACTIVE`
+    to the end of `FINALIZING` must happen within `finalization_wait`
+    number of seconds. If not, all still running Processes are sent
+    `SIGKILL`s (or if dependent on yet to be completed Processes, are
+    never invoked).
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. The first (strongly recommended) optional attribute is:
+
+-   `contact`: An email address for the Job's owner. For production
+    jobs, it is usually a team mailing list.
+
+Two more attributes deal with how to handle failure of the Job's Task:
+
+-   `max_task_failures`: An integer, defaulting to `1`, of the maximum
+    number of Task failures after which the Job is considered failed.
+    `-1` allows for infinite failures.
+
+-   `service`: A boolean, defaulting to `False`, which if `True`
+    restarts tasks regardless of whether they succeeded or failed. In
+    other words, if `True`, after the Job's Task completes, it
+    automatically starts again. This is for Jobs you want to run
+    continuously, rather than doing a single run.
+
+Three attributes deal with configuring the Job's Task:
+
+-   `instances`: Defaulting to `1`, the number of
+    instances/replicas/shards of the Job's Task to create.
+
+-   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
+    for which higher values may preempt Tasks from Jobs with lower
+    values.
+
+-   `production`: a Boolean, defaulting to `False`, specifying that this
+    is a production job backed by quota. Tasks from production Jobs may
+    preempt tasks from any non-production job, and may only be preempted
+    by tasks from production jobs in the same role with higher
+    priority. **WARNING**: To run Jobs at this level, the Job role must
+    have the appropriate quota.
+
+The final three Job attributes each take an object as their value.
+
+-   `update_config`: An `UpdateConfig`
+    object provides parameters for controlling the rate and policy of
+    rolling updates. The `UpdateConfig` parameters are:
+    -   `batch_size`: An integer, defaulting to `1`, specifying the
+        maximum number of shards to update in one iteration.
+    -   `restart_threshold`: An integer, defaulting to `60`, specifying
+        the maximum number of seconds before a shard must move into the
+        `RUNNING` state before considered a failure.
+    -   `watch_secs`: An integer, defaulting to `45`, specifying the
+        minimum number of seconds a shard must remain in the `RUNNING`
+        state before considered a success.
+    -   `max_per_shard_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of restarts per shard during an
+        update. When the limit is exceeded, it increments the total
+        failure count.
+    -   `max_total_failures`: An integer, defaulting to `0`, specifying
+        the maximum number of shard failures tolerated during an update.
+        Cannot be equal to or greater than the job's total number of
+        tasks.
+-   `health_check_config`: A `HealthCheckConfig` object that provides
+    parameters for controlling a Task's health checks via HTTP. Only
+    used if a health port was assigned with a command line wildcard. The
+    `HealthCheckConfig` parameters are:
+    -   `initial_interval_secs`: An integer, defaulting to `15`,
+        specifying the initial delay for doing an HTTP health check.
+    -   `interval_secs`: An integer, defaulting to `10`, specifying the
+        number of seconds in the interval between checking the Task's
+        health.
+    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
+        number of seconds the application must respond to an HTTP health
+        check with `OK` before it is considered a failure.
+    -   `max_consecutive_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of consecutive failures before a
+        task is unhealthy.
+-   `constraints`: A `dict` Python object, specifying Task scheduling
+    constraints. Most users will not need to specify constraints, as the
+    scheduler automatically inserts reasonable defaults. Please do not
+    set this field unless you are sure of what you are doing. See the
+    section in the Aurora + Thermos Reference manual on [Specifying
+    Scheduling Constraints](/documentation/0.6.0-incubating/configuration-reference/) for more information.
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.6.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+We can also use {{}} variables as sectional variables. Let's say we
+have:
+
+    {{#x}} Testing... {{/x}}
+
+If `x` evaluates to `True`, the text between the sectional tags is
+shown. If there is no value for `x` or it evaluates to `False`, the
+between tags text is not shown. So, at a basic level, a sectional
+variable acts as a conditional.
+
+However, if the sectional variable evaluates to a list, array, etc. it
+acts as a `foreach`. For example,
+
+    {{#x}} {{name}} {{/x}}
+
+with
+
+    { "x": [ { "name" : "tic" } { "name" : "tac" } { "name" : "toe" } ] }
+
+evaluates to
+
+    tic tac toe
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.6.0-incubating/contributing.md b/source/documentation/0.6.0-incubating/contributing.md
new file mode 100644
index 0000000..7ebd30e
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/contributing.md
@@ -0,0 +1,68 @@
+Get the Source Code
+-------------------
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/incubator-aurora
+
+Find Something to Do
+--------------------
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" tag](https://issues.apache.org/jira/browse/AURORA-189?jql=project%20%3D%20AURORA%20AND%20resolution%20%3D%20Unresolved%20AND%20labels%20%3D%20newbie%20ORDER%20BY%20priority%20DESC)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+Getting your ReviewBoard Account
+--------------------------------
+Go to https://reviews.apache.org and create an account.
+
+Setting up your ReviewBoard Environment
+---------------------------------------
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+Submitting a Patch for Review
+-----------------------------
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+Updating an Existing Review
+---------------------------
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+Merging Your Own Review (Committers)
+------------------------------------
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+Merging Someone Else's Review
+-----------------------------
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Cleaning Up
+-----------
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.6.0-incubating/cron-jobs.md b/source/documentation/0.6.0-incubating/cron-jobs.md
new file mode 100644
index 0000000..ddc0d12
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/cron-jobs.md
@@ -0,0 +1,130 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.6.0-incubating/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.6.0-incubating/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.6.0-incubating/configuration-reference/#task-objects) object. To get "run-until-failure" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora2 help cron`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs, or updates an existing job.
+
+    $ aurora2 cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora2 cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora2 cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/incubator-aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.6.0-incubating/deploying-aurora-scheduler.md b/source/documentation/0.6.0-incubating/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..5e9f41a
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/deploying-aurora-scheduler.md
@@ -0,0 +1,259 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+  - [Tasks are stuck in PENDING forever](#tasks-are-stuck-in-pending-forever)
+    - [Symptoms](#symptoms-2)
+    - [Solution](#solution-2)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 7 or higher) and libmesos (0.20.1) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/incubator-aurora.git
+    cd incubator-aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.6.0-incubating/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on the first master.
+
+    mesos-log initialize --path="$AURORA_HOME/scheduler/db"
+
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.6.0-incubating/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.6.0-incubating/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.6.0-incubating/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="host:$HOST;rack:$RACK;dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:dba_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+### Tasks are stuck in `PENDING` forever
+
+#### Symptoms
+The scheduler is registered, and (receiving offers](docs/monitoring.md#scheduler_resource_offers),
+but tasks are perpetually shown as `PENDING - Constraint not satisfied: host`.
+
+#### Solution
+Check that your slaves are configured with `host` and `rack` attributes.  Aurora requires that
+slaves are tagged with these two common failure domains to ensure that it can safely place tasks
+such that jobs are resilient to failure.
+
+See our [vagrant example](examples/vagrant/upstart/mesos-slave.conf) for details.
diff --git a/source/documentation/0.6.0-incubating/design/command-hooks.md b/source/documentation/0.6.0-incubating/design/command-hooks.md
new file mode 100644
index 0000000..cc56218
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/design/command-hooks.md
@@ -0,0 +1,263 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered two ways:
+* Project hooks file. If a file named `AuroraHooks` is in the project directory
+  where an aurora command is being executed, that file will be read,
+  and its hooks will be registered.
+* Configuration plugins. A configuration plugin can register hooks using an API.
+  Hooks registered this way are, effectively, hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class `GlobalCommandHookRegistry`.  A
+global hook can be registered by calling `GlobalCommandHookRegistry.register_command_hook`
+in a configuration plugin.
+
+### Hook Files
+
+A hook file is a file containing Python source code. It will be
+dynamically loaded by the Aurora command line executable. After
+loading, the client will check the module for a global variable named
+"hooks", which contains a list of hook objects, which will be added to
+the hook registry.
+
+A project hooks file will be named `AuroraHooks`,
+and will be located in either the directory where the command is being
+executed, or one of its parent directories, up to the nearest git/mercurial
+repository base.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+## Skipping Hooks
+
+In a perfect world, hooks would represent a global property or policy
+that should always be enforced. Unfortunately, we don't live in a
+perfect world, which means that sometimes, every rule needs to get
+broken.
+
+For example, an organization could decide that every configuration
+must be checked in to source control before it could be
+deployed. That's an entirely reasonable policy. It would be easy to
+implement it using a hook. But what if there's a problem, and the
+source repos is down?
+
+The easiest solution is just to allow a user to add a `--skip-hooks`
+flag to the command-line. But doing that means that an organization
+can't actually use hooks to enforce policy, because users can skip
+them whenever they want.
+
+Instead, we'd like to have a system where it's possible to create
+hooks to enforce policy, and then include a way of building policy
+about when hooks can be skipped.
+
+I'm using sudo as a rough model for this. Many organizations need to
+give people the ability to run privileged commands, but they still
+want to have some control. Sudo allows them to specify who is allowed
+to run a privileged command; where they're allowed to run it; and what
+command(s) they're allowed to run.  All of that is specified in a
+special system file located in `/etc/sudoers` on a typical unix
+machine.
+
+In a world of distributed systems, this approach has one grave
+weakness. An aurora client can be located on any machine that has
+network access to a Mesos/Aurora cluster. It can be run by a user in
+pretty much any way they want - from a machine they control, from a
+special chroot they created, etc. Relying an a file being in a special
+location on their machine isn't sufficient - it's too easy to
+maliciously or erroneously run a command in an environment with an
+invalid hooks exceptions file.
+
+Instead, we've got two basic choices: hook exception rules can be
+baked into the client executable, or they can be provided in a
+network location.
+
+### Specifying when hooks can be skipped
+
+#### Hooks File
+
+The module `apache.aurora.client.cli` contains a variable named
+`GLOBAL_HOOK_SKIP_RULES_URL`. In the default distribution of Aurora, tihs variable contains
+`None`. Users can modify this value for their local environments, providing
+a site specific URL. If users attempt to bypass command hooks, and this
+URL is not `None`, then the client will fetch the contents of that URL, and
+attempt to interpret it as a hooks exception file.
+
+The hooks exception file is written in JSON, with the following structure:
+
+    { "rulename":
+      {
+      "hooks": [ "hook-name ", ... ],
+      "users": [ string, ...],
+      "commands": { "job": ["kill", "killall", ...], ... },
+      "arg-patterns": [ "regexp-str", ... ]
+      },
+	  ...
+    }
+
+* `hooks` is a list of hook identifiers which can be skipped by a user
+  that satisfies this rule. If omitted, then this rule applies to _all hooks_.
+  (Omitting the `hooks` field is equivalent to giving it the value `['*']`.)
+* `users` is a list of user names, or glob expressions that range over user
+  names. This rule gives permission to those users to skip hooks. If omitted,
+  then this rule allows _any user_ to skip hooks that satisfy the rest of this rule.
+  Note that this is _user_ names, not
+  _role_ names: the rules specify users that are allowed to skip commands.
+  Some users that are allowed to work with a role account may be allowed to
+  skip, while others cannot.
+* `commands` is a map from nouns to lists of verbs. If a command `aurora n v`
+  is being executed, this rule allows the hooks to be skipped if
+  `v` is in `commands[n]`. If this is omitted, then it allows hooks to be skipped for all
+  commands that satisfy the rest of the rule.
+* `arg_patterns` is a list of glob patterns ranging over parameters.
+  If any of the parameters of the command match the parameters in this list,
+  the hook can be skipped. If ommitted, then this applies regardless of arguments.
+
+For example, the following is a hook rules file which allows:
+* The user "root" to skip any hook.
+* Any user to skip hooks for test jobs.
+* A specific group of users to skip hooks for jobs in cluster `east`
+* Another group of users to skip hooks for `job kill` in cluster `west`.
+
+    {
+      "allow-admin": { "users": ["root"] },
+	  "allow-test": { "users": ["\*"], "arg-patterns": ["\*/\*/test/\*"] },
+	  "allow-east-users": { "users"=['john', 'mary', 'mike', 'sue'],
+          "arg-patterns": ["east/\*/\*/\*"] },
+	  "allow-west-kills": { "users": ["anne", "bill", "chris"],
+          "commands": { "job": ["kill"]}, "arg-patterns" = ["west/\*/\*/\*"] }
+    }
+
+#### Programmatic Hooks Exceptions
+
+The `GlobalHooksRegistry` contains the method `add_hooks_exception`, which allows
+users to register local hooks exceptions using the `ConfigurationPlugin` mechanism.
+A hooks exception object implements the following interface:
+
+    class HooksException(object):
+	  def allow_exception(self, hooks, role, noun, verb, args):
+	    """Params:
+        - hooks: a list of hook-names that the user wants to skip. If this
+		  is ommitted, then this applies to all hooks.
+		- role: the role requesting that hooks be skipped.
+		- noun, verb: the noun and verb being executed.
+		- the other command-line arguments.
+		Returns: True if the user should be allowed to skip the requested hooks.
+		"""
+	    return False
+
+When a user supplies the `--skip-hooks` argument, `allow_exception` is invoked on
+each of the `HooksException` arguments. If _any_ of the hooks exception objects
+returns `True`, then the user will be permitted to skip the hooks.
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
+
+
+## Changes
+
+4/30:
+* Rule exceptions are defined in JSON, and they are specified to be loaded from
+  a URL, not from a local file.
+* Rule exceptions specify users, not roles.
+
+4/27:
+Major changes between this and the last version of this proposal.
+* Command hooks can't be declared in a configuration file. There's a simple
+  reason why: hooks run before a command's implementation is invoked.
+  Config files are read during the commands invocation if necessary. If the
+  hook is declared in the config file, by the time you know that it should
+  have been run, it's too late. So I've removed config-hooks from the
+  proposal. (API hooks defined by configs still work.)
+* Skipping hooks. We expect aurora to be used inside of large
+  organizations. One of the primary use-cases of hooks is to create
+  enforcable policy that are specific to an organization. If hooks
+  can just be skipped because a user wants to skip them, it means that
+  the policy can't be enforced, which defeats the purpose of having them.
+  So in this update, I propose a mechanism, loosely based on a `sudo`-like
+  mechanism for defining when hooks can be skipped.
diff --git a/source/documentation/0.6.0-incubating/developing-aurora-client.md b/source/documentation/0.6.0-incubating/developing-aurora-client.md
new file mode 100644
index 0000000..e6be8ad
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/developing-aurora-client.md
@@ -0,0 +1,122 @@
+Getting Started
+===============
+
+Aurora consists of four main pieces: the scheduler (which finds resources in the cluster that can be used to run a job), the executor (which uses the resources assigned by the scheduler to run a job), the command-line client, and the web-ui. For information about working on the scheduler or the webUI, see the file "developing-aurora-scheduler.md" in this directory.
+
+If you want to work on the command-line client, this is the place for you!
+
+The client is written in Python, and unlike the server side of things, we build the client using the Pants build tool, instead of Gradle. Pants is a tool that was built by twitter for handling builds of large collaborative systems. You can see a detailed explanation of
+pants [here](http://pantsbuild.github.io/python-readme.html).
+
+To build the client executable, run the following in a command-shell:
+
+    $ ./pants src/main/python/apache/aurora/client/cli:aurora2
+
+This will produce a python executable _pex_ file in `dist/aurora2.pex`. Pex files
+are fully self-contained executables: just copy the pex file into your path, and you'll be able to run it. For example, for a typical installation:
+
+    $ cp dist/aurora2.pex /usr/local/bin/aurora
+
+To run all of the client tests:
+
+    $ ./pasts src/test/python/apache/aurora/client/:all
+
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.6.0-incubating/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.6.0-incubating/client-commands/#cluster-configuration) documentation.
+
+Client Versions
+===============
+
+There are currently two versions of the aurora client, imaginatively known as v1 and v2. All new development is done entirely in v2, but we continue to support and fix bugs in v1, until we get to the point where v2 is feature-complete and tested, and aurora users have had some time at adapt and switch their processes to use v2.
+
+Both versions are built on the same underlying API code.
+
+Client v1 was implemented using twitter.common.app. The command-line processing code for v1 can be found in `src/main/python/apache/aurora/client/commands` and
+`src/main/python/apache/aurora/client/bin`.
+
+Client v2 was implemented using its own noun/verb framework. The client v2 code can be found in `src/main/python/apache/aurora/client/cli`, and the noun/verb framework can be
+found in the `__init__.py` file in that directory.
+
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about are:
+
+   * Build a client v2 executable: `./pants src/main/python/apache/aurora/client/cli:aurora2`
+   * Test client v2 code: `./pants ./pants src/test/python/apache/aurora/client/cli:all`
+   * Build a client v1 executable: `./pants src/main/python/apache/aurora/client/bin:aurora_client`
+   * Test client v1 code: `./pants src/main/python/apache/aurora/client/commands:all`
+   * Test all client code: `./pants src/main/python/apache/aurora/client:all`
+
+
+Overview of the Client Architecture
+===================================
+
+The client is built on a stacked architecture:
+
+   1. At the lowest level, we have a thrift RPC API interface
+    to the aurora scheduler. The interface is declared in thrift, in the file
+    `src/main/thrift/org/apache/aurora/gen/api.thrift`.
+
+  2. On top of the primitive API, we have a client API. The client API
+    takes the primitive operations provided by the scheduler, and uses them
+    to implement client-side behaviors. For example, when you update a job,
+    on the scheduler, that's done by a sequence of operations.  The sequence is implemented
+    by the client API `update` method, which does the following using the thrift API:
+     * fetching the state of task instances in the mesos cluster, and figuring out which need
+       to be updated;
+     * For each task to be updated:
+         - killing the old version;
+         - starting the new version;
+         - monitoring the new version to ensure that the update succeeded.
+  3. On top of the API, we have the command-line client itself. The core client, at this level,
+    consists of the interface to the command-line which the user will use to interact with aurora.
+    The client v2 code is found in `src/python/apache/aurora/client/cli`. In the `cli` directory,
+    the rough structure is as follows:
+       * `__init__.py` contains the noun/verb command-line processing framework used by client v2.
+       * `jobs.py` contains the implementation of the core `job` noun, and all of its operations.
+       * `bridge.py` contains the implementation of a component that allows us to ship a
+         combined client that runs both v1 and v2 client commands during the transition period.
+       * `client.py` contains the code that binds the client v2 nouns and verbs into an executable.
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use vagrant. To start a virtual cluster,
+you need to install a working vagrant environment, and then run "vagrant up" for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master,
+a set of mesos slaves, and an aurora scheduler.
+
+To use the devcluster, you need to bring it up by running `vagrant up`, and then connect to the vagrant host using `vagrant ssh`. This will open a bash session on the virtual machine hosting the devcluster. In the home directory, there are two key paths to know about:
+
+   * `~/aurora`: this is a copy of the git workspace in which you launched the vagrant cluster.
+     To test client changes, you'll use this copy.
+   * `/vagrant`: this is a mounted filesystem that's a direct image of your git workspace.
+     This isn't a copy - it is your git workspace. Editing files on your host machine will
+     be immediately visible here, because they are the same files.
+
+Whenever the scheduler is modified, to update your vagrant environment to use the new scheduler,
+you'll need to re-initialize your vagrant images. To do this, you need to run two commands:
+
+   * `vagrant destroy`: this will delete the old devcluster image.
+   * `vagrant up`: this creates a fresh devcluster image based on the current state of your workspace.
+
+You should try to minimize rebuilding vagrant images; it's not horribly slow, but it does take a while.
+
+To test client changes:
+
+   * Make a change in your local workspace, and commit it.
+   * `vagrant ssh` into the devcluster.
+   * `cd aurora`
+   * Pull your changes into the vagrant copy: `git pull /vagrant *branchname*`.
+   * Build the modified client using pants.
+   * Run your command using `aurora2`. (You don't need to do any install; the aurora2 command
+     is a symbolic link to the executable generated by pants.)
diff --git a/source/documentation/latest/developing-aurora-scheduler.md b/source/documentation/0.6.0-incubating/developing-aurora-scheduler.md
similarity index 100%
copy from source/documentation/latest/developing-aurora-scheduler.md
copy to source/documentation/0.6.0-incubating/developing-aurora-scheduler.md
diff --git a/source/documentation/latest/hooks.md b/source/documentation/0.6.0-incubating/hooks.md
similarity index 100%
copy from source/documentation/latest/hooks.md
copy to source/documentation/0.6.0-incubating/hooks.md
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.6.0-incubating/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.6.0-incubating/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.6.0-incubating/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.6.0-incubating/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.6.0-incubating/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.6.0-incubating/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.6.0-incubating/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.6.0-incubating/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.6.0-incubating/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.6.0-incubating/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.6.0-incubating/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.6.0-incubating/images/aurora_hierarchy.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.6.0-incubating/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.6.0-incubating/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.6.0-incubating/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.6.0-incubating/images/lifeofatask.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.6.0-incubating/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.6.0-incubating/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.6.0-incubating/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.6.0-incubating/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.6.0-incubating/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.6.0-incubating/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.6.0-incubating/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.6.0-incubating/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.6.0-incubating/index.html.md b/source/documentation/0.6.0-incubating/index.html.md
new file mode 100644
index 0000000..0a9eba6
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/index.html.md
@@ -0,0 +1,20 @@
+# Overview
+
+*Aurora* is a service scheduler that schedules jobs onto *Mesos*, which runs tasks at a specified cluster. Typical services consist of up to hundreds of task replicas.
+
+Aurora provides a *Job* abstraction consisting of a *Task* template and instructions for creating near-identical replicas of that Task (modulo things like "instance id" or specific port numbers which may differ from machine to machine).
+
+*Terminology Note*: *Replicas* are also referred to as *shards* and *instances*. While there is a general desire to move to using "instances", "shard" is still found in commands and help strings.
+
+Typically a Task is a single *Process* corresponding to a single command line, such as `python2.6 my_script.py`. However, sometimes you must colocate separate Processes together within a single Task, which runs within a single container and `chroot`, often referred to as a "sandbox". For example, if you run multiple cooperating agents together such as `logrotate`, `installer`, and master or slave processes. *Thermos* provides a Process abstraction under the Mesos Tasks.
+
+To use and get up to speed on Aurora, you should look the docs in this directory in this order:
+
+1. How to [deploy Aurora](/documentation/0.6.0-incubating/deploying-aurora-scheduler/) or, how to [install Aurora on virtual machines on your private machine](/documentation/0.6.0-incubating/vagrant/) (the Tutorial uses the virtual machine approach).
+2. As a user, get started quickly with a [Tutorial](/documentation/0.6.0-incubating/tutorial/).
+3. For an overview of Aurora's process flow under the hood, see the [User Guide](/documentation/0.6.0-incubating/user-guide/).
+4. To learn how to write a configuration file, look at our [Configuration Tutorial](/documentation/0.6.0-incubating/configuration-tutorial/). From there, look at the [Aurora + Thermos Reference](/documentation/0.6.0-incubating/configuration-reference/).
+5. Then read up on the [Aurora Command Line Client](/documentation/0.6.0-incubating/client-commands/).
+6. Find out general information and useful tips about how Aurora does [Resource Isolation](/documentation/0.6.0-incubating/resource-isolation/).
+
+To contact the Aurora Developer List, email [dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org). You may want to read the list [archives](http://mail-archives.apache.org/mod_mbox/incubator-aurora-dev/). You can also use the IRC channel `#aurora` on `irc.freenode.net`
diff --git a/source/documentation/latest/monitoring.md b/source/documentation/0.6.0-incubating/monitoring.md
similarity index 100%
rename from source/documentation/latest/monitoring.md
rename to source/documentation/0.6.0-incubating/monitoring.md
diff --git a/source/documentation/latest/resource-isolation.md b/source/documentation/0.6.0-incubating/resource-isolation.md
similarity index 100%
copy from source/documentation/latest/resource-isolation.md
copy to source/documentation/0.6.0-incubating/resource-isolation.md
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.6.0-incubating/scheduler-storage.md
similarity index 100%
copy from source/documentation/latest/scheduler-storage.md
copy to source/documentation/0.6.0-incubating/scheduler-storage.md
diff --git a/source/documentation/0.6.0-incubating/sla.md b/source/documentation/0.6.0-incubating/sla.md
new file mode 100644
index 0000000..3d56e95
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/sla.md
@@ -0,0 +1,176 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature currently supports stat collection only for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config).
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
\ No newline at end of file
diff --git a/source/documentation/0.6.0-incubating/storage-config.md b/source/documentation/0.6.0-incubating/storage-config.md
new file mode 100644
index 0000000..af66a6d
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.6.0-incubating/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.6.0-incubating/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360min`
+  * Make sure `-gc_executor_path` option is not set to prevent accidental task GC. This is
+    important as scheduler will attempt to reconcile the cluster state and will kill all tasks when
+    restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize <-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.6.0-incubating/storage.md b/source/documentation/0.6.0-incubating/storage.md
new file mode 100644
index 0000000..12fa270
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.6.0-incubating/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.6.0-incubating/test-resource-generation.md b/source/documentation/0.6.0-incubating/test-resource-generation.md
new file mode 100644
index 0000000..e5b0da2
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/test-resource-generation.md
@@ -0,0 +1,25 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by other components (the GC executor
+and the thermos observer), it is important that we have tests that prevent
+regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.6.0-incubating/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.6.0-incubating/tutorial.md b/source/documentation/0.6.0-incubating/tutorial.md
new file mode 100644
index 0000000..3744585
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.6.0-incubating/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.6.0-incubating/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.6.0-incubating/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.6.0-incubating/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.6.0-incubating/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.6.0-incubating/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use the `aurora help` subcommand, and read the
+  [Aurora Client Commands](/documentation/0.6.0-incubating/client-commands/) document.
diff --git a/source/documentation/0.6.0-incubating/user-guide.md b/source/documentation/0.6.0-incubating/user-guide.md
new file mode 100644
index 0000000..cb9fd4f
--- /dev/null
+++ b/source/documentation/0.6.0-incubating/user-guide.md
@@ -0,0 +1,363 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.6.0-incubating/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora update --shards=0-1 <job_key_value> new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.6.0-incubating/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.6.0-incubating/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.6.0-incubating/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.6.0-incubating/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.6.0-incubating/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora open`:
+
+      aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora Client's command line interface
+
+  Several Client commands have a `-o` option that automatically opens a window to
+  the specified Job's scheduler UI URL. And, as described above, the `open` command also takes
+  you there.
+
+  For a complete list of Aurora Client commands, use `aurora help` and, for specific
+  command help, `aurora help [command]`. **Note**: `aurora help open`
+  returns `"subcommand open not found"` due to our reflection tricks not
+  working on words that are also builtin Python function names. Or see the
+  [Aurora Client Commands](/documentation/0.6.0-incubating/client-commands/) document.
diff --git a/source/documentation/latest/vagrant.md b/source/documentation/0.6.0-incubating/vagrant.md
similarity index 100%
rename from source/documentation/latest/vagrant.md
rename to source/documentation/0.6.0-incubating/vagrant.md
diff --git a/source/documentation/latest/client-cluster-configuration.md b/source/documentation/0.7.0-incubating/client-cluster-configuration.md
similarity index 100%
copy from source/documentation/latest/client-cluster-configuration.md
copy to source/documentation/0.7.0-incubating/client-cluster-configuration.md
diff --git a/source/documentation/0.7.0-incubating/client-commands.md b/source/documentation/0.7.0-incubating/client-commands.md
new file mode 100644
index 0000000..c6a20f5
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/client-commands.md
@@ -0,0 +1,390 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Asynchronous job updates (beta)](#asynchronous-job-updates-beta)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.7.0-incubating/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job cancel-update`
+  - `job create`
+  - `job kill`
+  - `job restart`
+  - `job update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.7.0-incubating/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+    aurora job update CLUSTER/ROLE/ENV/NAME[/INSTANCES] <configuration file>
+    aurora job cancel-update CLUSTER/ROLE/ENV/NAME
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by specifying
+specific instances that should be updated.
+
+You may want to run `aurora job diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora job cancel-update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `job cancel-update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `job cancel-update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `job update`.
+
+#### Asynchronous job updates (beta)
+
+As of 0.6.0, Aurora will coordinate updates (and rollbacks) within the
+scheduler. Performing updates this way also allows the scheduler to display
+update progress and job update history in the browser.
+
+There are several sub-commands to manage job updates:
+
+    aurora beta-update start <job key> <configuration file>
+    aurora beta-update status <job key>
+    aurora beta-update pause <job key>
+    aurora beta-update resume <job key>
+    aurora beta-update abort <job key>
+    aurora beta-update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `status` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.7.0-incubating/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.7.0-incubating/committers.md b/source/documentation/0.7.0-incubating/committers.md
new file mode 100644
index 0000000..6e80f68
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/incubator-aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/incubator/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/incubator/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.7.0-incubating/configuration-reference.md b/source/documentation/0.7.0-incubating/configuration-reference.md
new file mode 100644
index 0000000..eb682ce
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/configuration-reference.md
@@ -0,0 +1,559 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.7.0-incubating/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.7.0-incubating/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of times processes that are part of this
+Task can fail before the entire Task is marked for failure.
+
+For example:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as
+permanently failed, and the `succeeding` Process would succeed on the
+first run. The task would succeed despite only allowing for two failed
+processes. To be more specific, there would be 10 failed process runs
+yet 1 failed process.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.7.0-incubating/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.7.0-incubating/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota.
+  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.7.0-incubating/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Object
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```image```    | String         | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.7.0-incubating/configuration-tutorial.md b/source/documentation/0.7.0-incubating/configuration-tutorial.md
new file mode 100644
index 0000000..e84c61f
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/configuration-tutorial.md
@@ -0,0 +1,1124 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora inspect`. It takes the same job key and configuration file
+arguments as `aurora create` or `aurora update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.7.0-incubating/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.7.0-incubating/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.7.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has five optional attributes, each with a default value
+if one isn't specified in the configuration:
+
+-   `max_failures`: Defaulting to `1`, the maximum number of failures
+    (non-zero exit statuses) before this `Process` is marked permanently
+    failed and not retried. If a `Process` permanently fails, Thermos
+    checks the `Process` object's containing `Task` for the task's
+    failure limit (usually 1) to determine whether or not the `Task`
+    should be failed. Setting `max_failures`to `0` means that this
+    process will keep retrying until a successful (zero) exit status is
+    achieved. Retries happen at most once every `min_duration` seconds
+    to prevent effectively mounting a denial of service attack against
+    the coordinating scheduler.
+
+-   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
+    successful (zero) exit status does not prevent future process runs.
+    Instead, the `Process` reinvokes after `min_duration` seconds.
+    However, the maximum failure limit (`max_failures`) still
+    applies. A combination of `daemon=True` and `max_failures=0` retries
+    a `Process` indefinitely regardless of exit status. This should
+    generally be avoided for very short-lived processes because of the
+    accumulation of checkpointed state for each process run. When
+    running in Aurora, `max_failures` is capped at
+    100.
+
+-   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
+    `Process`' status is not used to determine if its bound `Task` has
+    completed. For example, consider a `Task` with a
+    non-ephemeral webserver process and an ephemeral logsaver process
+    that periodically checkpoints its log files to a centralized data
+    store. The `Task` is considered finished once the webserver process
+    finishes, regardless of the logsaver's current status.
+
+-   `min_duration`: Defaults to `15`. Processes may succeed or fail
+    multiple times during a single Task. Each result is called a
+    *process run* and this value is the minimum number of seconds the
+    scheduler waits before re-running the same process.
+
+-   `final`: Defaulting to `False`, this is a finalizing `Process` that
+    should run last. Processes can be grouped into two classes:
+    *ordinary* and *finalizing*. By default, Thermos Processes are
+    ordinary. They run as long as the `Task` is considered
+    healthy (i.e. hasn't reached a failure limit). But once all regular
+    Thermos Processes have either finished or the `Task` has reached a
+    certain failure threshold, Thermos moves into a *finalization* stage
+    and runs all finalizing Processes. These are typically necessary for
+    cleaning up after the `Task`, such as log checkpointers, or perhaps
+    e-mail notifications of a completed Task. Finalizing processes may
+    not depend upon ordinary processes or vice-versa, however finalizing
+    processes may depend upon other finalizing processes and will
+    otherwise run as a typical process schedule.
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+There are four optional Task attributes:
+
+-   `constraints`: A list of `Constraint` objects that constrain the
+    Task's processes. Currently there is only one type, the `order`
+    constraint. For example the following requires that the processes
+    run in the order `foo`, then `bar`.
+
+        constraints = [Constraint(order=['foo', 'bar'])]
+
+    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
+    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
+    `order()` accepts Process name strings `('foo', 'bar')` or the processes
+    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+    `constraints=order(foo, bar)`
+
+    Note that Thermos rejects tasks with process cycles.
+
+-   `max_failures`: Defaulting to `1`, the number of failed processes
+    needed for the `Task` to be marked as failed. Note how this
+    interacts with individual Processes' `max_failures` values. Assume a
+    Task has two Processes and a `max_failures` value of `2`. So both
+    Processes must fail for the Task to fail. Now, assume each of the
+    Task's Processes has its own `max_failures` value of `10`. If
+    Process "A" fails 5 times before succeeding, and Process "B" fails
+    10 times and is then marked as failing, their parent Task succeeds.
+    Even though there were 15 individual failures by its Processes, only
+    1 of its Processes was finally marked as failing. Since 1 is less
+    than the 2 that is the Task's `max_failures` value, the Task does
+    not fail.
+
+-   `max_concurrency`: Defaulting to `0`, the maximum number of
+    concurrent processes in the Task. `0` specifies unlimited
+    concurrency. For Tasks with many expensive but otherwise independent
+    processes, you can limit the amount of concurrency Thermos schedules
+    instead of artificially constraining them through `order`
+    constraints. For example, a test framework may generate a Task with
+    100 test run processes, but runs it in a Task with
+    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
+    `max_concurrency=4`.
+
+-   `finalization_wait`: Defaulting to `30`, the number of seconds
+    allocated for finalizing the Task's processes. A Task starts in
+    `ACTIVE` state when Processes run and stays there as long as the Task
+    is healthy and Processes run. When all Processes finish successfully
+    or the Task reaches its maximum process failure limit, it goes into
+    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
+    Processes. When all Processes terminate, the Task goes into
+    `FINALIZING` state and invokes the schedule of all processes whose
+    final attribute has a True value. Everything from the end of `ACTIVE`
+    to the end of `FINALIZING` must happen within `finalization_wait`
+    number of seconds. If not, all still running Processes are sent
+    `SIGKILL`s (or if dependent on yet to be completed Processes, are
+    never invoked).
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. The first (strongly recommended) optional attribute is:
+
+-   `contact`: An email address for the Job's owner. For production
+    jobs, it is usually a team mailing list.
+
+Two more attributes deal with how to handle failure of the Job's Task:
+
+-   `max_task_failures`: An integer, defaulting to `1`, of the maximum
+    number of Task failures after which the Job is considered failed.
+    `-1` allows for infinite failures.
+
+-   `service`: A boolean, defaulting to `False`, which if `True`
+    restarts tasks regardless of whether they succeeded or failed. In
+    other words, if `True`, after the Job's Task completes, it
+    automatically starts again. This is for Jobs you want to run
+    continuously, rather than doing a single run.
+
+Three attributes deal with configuring the Job's Task:
+
+-   `instances`: Defaulting to `1`, the number of
+    instances/replicas/shards of the Job's Task to create.
+
+-   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
+    for which higher values may preempt Tasks from Jobs with lower
+    values.
+
+-   `production`: a Boolean, defaulting to `False`, specifying that this
+    is a production job backed by quota. Tasks from production Jobs may
+    preempt tasks from any non-production job, and may only be preempted
+    by tasks from production jobs in the same role with higher
+    priority. **WARNING**: To run Jobs at this level, the Job role must
+    have the appropriate quota.
+
+The final three Job attributes each take an object as their value.
+
+-   `update_config`: An `UpdateConfig`
+    object provides parameters for controlling the rate and policy of
+    rolling updates. The `UpdateConfig` parameters are:
+    -   `batch_size`: An integer, defaulting to `1`, specifying the
+        maximum number of shards to update in one iteration.
+    -   `restart_threshold`: An integer, defaulting to `60`, specifying
+        the maximum number of seconds before a shard must move into the
+        `RUNNING` state before considered a failure.
+    -   `watch_secs`: An integer, defaulting to `45`, specifying the
+        minimum number of seconds a shard must remain in the `RUNNING`
+        state before considered a success.
+    -   `max_per_shard_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of restarts per shard during an
+        update. When the limit is exceeded, it increments the total
+        failure count.
+    -   `max_total_failures`: An integer, defaulting to `0`, specifying
+        the maximum number of shard failures tolerated during an update.
+        Cannot be equal to or greater than the job's total number of
+        tasks.
+-   `health_check_config`: A `HealthCheckConfig` object that provides
+    parameters for controlling a Task's health checks via HTTP. Only
+    used if a health port was assigned with a command line wildcard. The
+    `HealthCheckConfig` parameters are:
+    -   `initial_interval_secs`: An integer, defaulting to `15`,
+        specifying the initial delay for doing an HTTP health check.
+    -   `interval_secs`: An integer, defaulting to `10`, specifying the
+        number of seconds in the interval between checking the Task's
+        health.
+    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
+        number of seconds the application must respond to an HTTP health
+        check with `OK` before it is considered a failure.
+    -   `max_consecutive_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of consecutive failures before a
+        task is unhealthy.
+-   `constraints`: A `dict` Python object, specifying Task scheduling
+    constraints. Most users will not need to specify constraints, as the
+    scheduler automatically inserts reasonable defaults. Please do not
+    set this field unless you are sure of what you are doing. See the
+    section in the Aurora + Thermos Reference manual on [Specifying
+    Scheduling Constraints](/documentation/0.7.0-incubating/configuration-reference/) for more information.
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.7.0-incubating/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.7.0-incubating/contributing.md b/source/documentation/0.7.0-incubating/contributing.md
new file mode 100644
index 0000000..beb0a69
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/contributing.md
@@ -0,0 +1,76 @@
+Get the Source Code
+-------------------
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/incubator-aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+Find Something to Do
+--------------------
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+Getting your ReviewBoard Account
+--------------------------------
+Go to https://reviews.apache.org and create an account.
+
+Setting up your ReviewBoard Environment
+---------------------------------------
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+Submitting a Patch for Review
+-----------------------------
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+Updating an Existing Review
+---------------------------
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+Merging Your Own Review (Committers)
+------------------------------------
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+Merging Someone Else's Review
+-----------------------------
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Cleaning Up
+-----------
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.7.0-incubating/cron-jobs.md b/source/documentation/0.7.0-incubating/cron-jobs.md
new file mode 100644
index 0000000..9940bc5
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.7.0-incubating/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.7.0-incubating/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.7.0-incubating/configuration-reference/#task-objects) object. To get "run-until-failure" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/incubator-aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.7.0-incubating/deploying-aurora-scheduler.md b/source/documentation/0.7.0-incubating/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..0730cdc
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/deploying-aurora-scheduler.md
@@ -0,0 +1,277 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+  - [Tasks are stuck in PENDING forever](#tasks-are-stuck-in-pending-forever)
+    - [Symptoms](#symptoms-2)
+    - [Solution](#solution-2)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 7 or higher) and libmesos (0.20.1) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/incubator-aurora.git
+    cd incubator-aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.7.0-incubating/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on the first master.
+
+    mesos-log initialize --path="$AURORA_HOME/scheduler/db"
+
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.7.0-incubating/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+*Note: Docker support is currently EXPERIMENTAL.*
+
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, aurora will configure mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that arguement.  In addition, the executor pex itself must be
+included in `-thermos_executor_resources` option.  Doing so will ensure that both the wrapper script
+and executor are correctly copied into the sandbox.  In addition, ensure the wrapper script does not
+access resources outside of the sandbox, because when running inside a docker container they will not
+exist.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7 installed.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.7.0-incubating/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.7.0-incubating/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="host:$HOST;rack:$RACK;dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+### Tasks are stuck in `PENDING` forever
+
+#### Symptoms
+The scheduler is registered, and (receiving offers](docs/monitoring.md#scheduler_resource_offers),
+but tasks are perpetually shown as `PENDING - Constraint not satisfied: host`.
+
+#### Solution
+Check that your slaves are configured with `host` and `rack` attributes.  Aurora requires that
+slaves are tagged with these two common failure domains to ensure that it can safely place tasks
+such that jobs are resilient to failure.
+
+See our [vagrant example](examples/vagrant/upstart/mesos-slave.conf) for details.
diff --git a/source/documentation/0.7.0-incubating/design/command-hooks.md b/source/documentation/0.7.0-incubating/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.7.0-incubating/developing-aurora-client.md b/source/documentation/0.7.0-incubating/developing-aurora-client.md
new file mode 100644
index 0000000..13bc3c3
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/developing-aurora-client.md
@@ -0,0 +1,89 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.7.0-incubating/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.7.0-incubating/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client/cli:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have changed you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+<a href="images/debug-client-test.png"><img src="images/debug-client-test.png" width="697" height="444" /></a>
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+<a href="images/debugging-client-test.png"><img src="images/debugging-client-test.png" width="697" height="444" /></a>
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+
diff --git a/source/documentation/latest/developing-aurora-scheduler.md b/source/documentation/0.7.0-incubating/developing-aurora-scheduler.md
similarity index 100%
copy from source/documentation/latest/developing-aurora-scheduler.md
copy to source/documentation/0.7.0-incubating/developing-aurora-scheduler.md
diff --git a/source/documentation/latest/hooks.md b/source/documentation/0.7.0-incubating/hooks.md
similarity index 100%
copy from source/documentation/latest/hooks.md
copy to source/documentation/0.7.0-incubating/hooks.md
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.7.0-incubating/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.7.0-incubating/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.7.0-incubating/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.7.0-incubating/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.7.0-incubating/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.7.0-incubating/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.7.0-incubating/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.7.0-incubating/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.7.0-incubating/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.7.0-incubating/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.7.0-incubating/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.7.0-incubating/images/aurora_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.7.0-incubating/images/debug-client-test.png b/source/documentation/0.7.0-incubating/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.7.0-incubating/images/debugging-client-test.png b/source/documentation/0.7.0-incubating/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/images/debugging-client-test.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.7.0-incubating/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.7.0-incubating/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.7.0-incubating/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.7.0-incubating/images/lifeofatask.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.7.0-incubating/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.7.0-incubating/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.7.0-incubating/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.7.0-incubating/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.7.0-incubating/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.7.0-incubating/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.7.0-incubating/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.7.0-incubating/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.7.0-incubating/index.html.md b/source/documentation/0.7.0-incubating/index.html.md
new file mode 100644
index 0000000..975bed2
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/index.html.md
@@ -0,0 +1,33 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+ 
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+This documentation is a work in progress, and we encourage you to ask questions on the [Aurora developer list](http://aurora.incubator.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.7.0-incubating/vagrant/)
+ * [Hello World Tutorial](/documentation/0.7.0-incubating/tutorial/)
+ * [User Guide](/documentation/0.7.0-incubating/user-guide/)
+ * [Configuration Tutorial](/documentation/0.7.0-incubating/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.7.0-incubating/configuration-reference/)
+ * [Command Line Client](/documentation/0.7.0-incubating/client-commands/)
+ * [Cron Jobs](/documentation/0.7.0-incubating/cron-jobs/)
+
+## Operators
+ * [Deploy Aurora](/documentation/0.7.0-incubating/deploying-aurora-scheduler/)
+ * [Monitoring](/documentation/0.7.0-incubating/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.7.0-incubating/hooks/)
+ * [Scheduler Storage](/documentation/0.7.0-incubating/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.7.0-incubating/storage-config/)
+ * [SLA Measurement](/documentation/0.7.0-incubating/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.7.0-incubating/resource-isolation/)
+ * [Generating test resources](/documentation/0.7.0-incubating/test-resource-generation/)
+
+## Developers
+ * [Contributing to the project](/documentation/0.7.0-incubating/CONTRIBUTING/)
+ * [Developing the Aurora Scheduler](/documentation/0.7.0-incubating/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.7.0-incubating/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.7.0-incubating/committers/)
diff --git a/source/documentation/0.7.0-incubating/index2.html.md b/source/documentation/0.7.0-incubating/index2.html.md
new file mode 100644
index 0000000..0b6a083
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/index2.html.md
@@ -0,0 +1,33 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+ 
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+This documentation is a work in progress, and we encourage you to ask questions on the [Aurora developer list](http://aurora.incubator.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.7.0-incubating//)
+ * [Hello World Tutorial](/documentation/0.7.0-incubating//)
+ * [User Guide](/documentation/0.7.0-incubating//)
+ * [Configuration Tutorial](/documentation/0.7.0-incubating//)
+ * [Aurora + Thermos Reference](/documentation/0.7.0-incubating//)
+ * [Command Line Client](/documentation/0.7.0-incubating//)
+ * [Cron Jobs](/documentation/0.7.0-incubating//)
+
+## Operators
+ * [Deploy Aurora](/documentation/0.7.0-incubating//)
+ * [Monitoring](/documentation/0.7.0-incubating//)
+ * [Hooks for Aurora Client API](/documentation/0.7.0-incubating//)
+ * [Scheduler Storage](/documentation/0.7.0-incubating//)
+ * [Scheduler Storage and Maintenance](/documentation/0.7.0-incubating//)
+ * [SLA Measurement](/documentation/0.7.0-incubating//)
+ * [Resource Isolation and Sizing](/documentation/0.7.0-incubating//)
+ * [Generating test resources](/documentation/0.7.0-incubating//)
+
+## Developers
+ * [Contributing to the project](/documentation/0.7.0-incubating//)
+ * [Developing the Aurora Scheduler](/documentation/0.7.0-incubating//)
+ * [Developing the Aurora Client](/documentation/0.7.0-incubating//)
+ * [Committers Guide](/documentation/0.7.0-incubating//)
diff --git a/source/documentation/latest/monitoring.md b/source/documentation/0.7.0-incubating/monitoring.md
similarity index 100%
copy from source/documentation/latest/monitoring.md
copy to source/documentation/0.7.0-incubating/monitoring.md
diff --git a/source/documentation/latest/resource-isolation.md b/source/documentation/0.7.0-incubating/resource-isolation.md
similarity index 100%
copy from source/documentation/latest/resource-isolation.md
copy to source/documentation/0.7.0-incubating/resource-isolation.md
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.7.0-incubating/scheduler-storage.md
similarity index 100%
copy from source/documentation/latest/scheduler-storage.md
copy to source/documentation/0.7.0-incubating/scheduler-storage.md
diff --git a/source/documentation/0.7.0-incubating/sla.md b/source/documentation/0.7.0-incubating/sla.md
new file mode 100644
index 0000000..3d56e95
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/sla.md
@@ -0,0 +1,176 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature currently supports stat collection only for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config).
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
\ No newline at end of file
diff --git a/source/documentation/0.7.0-incubating/storage-config.md b/source/documentation/0.7.0-incubating/storage-config.md
new file mode 100644
index 0000000..03c95a8
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.7.0-incubating/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.7.0-incubating/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360min`
+  * Make sure `-gc_executor_path` option is not set to prevent accidental task GC. This is
+    important as scheduler will attempt to reconcile the cluster state and will kill all tasks when
+    restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize <-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.7.0-incubating/storage.md b/source/documentation/0.7.0-incubating/storage.md
new file mode 100644
index 0000000..5e9a7fc
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.7.0-incubating/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.7.0-incubating/test-resource-generation.md b/source/documentation/0.7.0-incubating/test-resource-generation.md
new file mode 100644
index 0000000..4a51105
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/test-resource-generation.md
@@ -0,0 +1,25 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by other components (the GC executor
+and the thermos observer), it is important that we have tests that prevent
+regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.7.0-incubating/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.7.0-incubating/tutorial.md b/source/documentation/0.7.0-incubating/tutorial.md
new file mode 100644
index 0000000..5a97fad
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.7.0-incubating/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.7.0-incubating/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.7.0-incubating/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.7.0-incubating/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.7.0-incubating/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.7.0-incubating/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.7.0-incubating/client-commands/) document.
diff --git a/source/documentation/0.7.0-incubating/user-guide.md b/source/documentation/0.7.0-incubating/user-guide.md
new file mode 100644
index 0000000..029d9ca
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.7.0-incubating/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora update --shards=0-1 <job_key_value> new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.7.0-incubating/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.7.0-incubating/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.7.0-incubating/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.7.0-incubating/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.7.0-incubating/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora open`:
+
+      aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.7.0-incubating/client-commands/).
diff --git a/source/documentation/0.7.0-incubating/vagrant.md b/source/documentation/0.7.0-incubating/vagrant.md
new file mode 100644
index 0000000..67f0426
--- /dev/null
+++ b/source/documentation/0.7.0-incubating/vagrant.md
@@ -0,0 +1,52 @@
+Getting Started
+===============
+To replicate a real cluster environment as closely as possible, we use
+[Vagrant](http://www.vagrantup.com/) to launch a complete Aurora cluster in a virtual machine.
+
+Prerequisites
+-------------
+  * [VirtualBox](https://www.virtualbox.org/)
+  * [Vagrant](http://www.vagrantup.com/)
+  * A clone of the Aurora repository, or source distribution.
+
+You can start a local cluster by running:
+
+    vagrant up
+
+Once started, several services should be running:
+
+  * scheduler is listening on http://192.168.33.7:8081
+  * observer is listening on http://192.168.33.7:1338
+  * master is listening on http://192.168.33.7:5050
+  * slave is listening on http://192.168.33.7:5051
+
+You can SSH into the machine with `vagrant ssh` and execute aurora client commands using the
+`aurora` command.  A pre-installed `clusters.json` file refers to your local cluster as
+`devcluster`, which you will use in client commands.
+
+Deleting your local cluster
+===========================
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Rebuilding components
+=====================
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on your vagrant machine to build and restart a component.  This is considerably faster than
+destroying and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update.  You may invoke the command with
+no arguments to get a list of supported components.
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Troubleshooting
+===============
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/latest/client-cluster-configuration.md b/source/documentation/0.8.0/client-cluster-configuration.md
similarity index 100%
copy from source/documentation/latest/client-cluster-configuration.md
copy to source/documentation/0.8.0/client-cluster-configuration.md
diff --git a/source/documentation/0.8.0/client-commands.md b/source/documentation/0.8.0/client-commands.md
new file mode 100644
index 0000000..cfb8de1
--- /dev/null
+++ b/source/documentation/0.8.0/client-commands.md
@@ -0,0 +1,410 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+        - [Client-orchestrated updates (deprecated)](#client-orchestrated-updates-deprecated)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.8.0/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job cancel-update`
+  - `job create`
+  - `job kill`
+  - `job restart`
+  - `job update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.8.0/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+#### Coordinated job updates
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](/documentation/0.8.0/configuration-reference/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+#### Client-orchestrated updates (deprecated)
+
+*Note: This feature is deprecated and will be removed in 0.9.0.
+Please use aurora update instead.*
+
+    aurora job update CLUSTER/ROLE/ENV/NAME[/INSTANCES] <configuration file>
+    aurora job cancel-update CLUSTER/ROLE/ENV/NAME
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by specifying
+specific instances that should be updated.
+
+You may want to run `aurora job diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora job cancel-update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `job cancel-update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `job cancel-update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `job update`.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.8.0/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.8.0/committers.md b/source/documentation/0.8.0/committers.md
new file mode 100644
index 0000000..e6b1646
--- /dev/null
+++ b/source/documentation/0.8.0/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.8.0/configuration-reference.md b/source/documentation/0.8.0/configuration-reference.md
new file mode 100644
index 0000000..5290eb9
--- /dev/null
+++ b/source/documentation/0.8.0/configuration-reference.md
@@ -0,0 +1,562 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.8.0/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.8.0/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of times processes that are part of this
+Task can fail before the entire Task is marked for failure.
+
+For example:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as
+permanently failed, and the `succeeding` Process would succeed on the
+first run. The task would succeed despite only allowing for two failed
+processes. To be more specific, there would be 10 failed process runs
+yet 1 failed process.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.8.0/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.8.0/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota. To grant quota to a particular role in production, operators use the ``aurora_admin set_quota`` command.
+  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](/documentation/0.8.0/client-commands/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.8.0/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Object
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```image```    | String         | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.8.0/configuration-tutorial.md b/source/documentation/0.8.0/configuration-tutorial.md
new file mode 100644
index 0000000..db8e800
--- /dev/null
+++ b/source/documentation/0.8.0/configuration-tutorial.md
@@ -0,0 +1,1125 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora job update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.8.0/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.8.0/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.8.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has five optional attributes, each with a default value
+if one isn't specified in the configuration:
+
+-   `max_failures`: Defaulting to `1`, the maximum number of failures
+    (non-zero exit statuses) before this `Process` is marked permanently
+    failed and not retried. If a `Process` permanently fails, Thermos
+    checks the `Process` object's containing `Task` for the task's
+    failure limit (usually 1) to determine whether or not the `Task`
+    should be failed. Setting `max_failures`to `0` means that this
+    process will keep retrying until a successful (zero) exit status is
+    achieved. Retries happen at most once every `min_duration` seconds
+    to prevent effectively mounting a denial of service attack against
+    the coordinating scheduler.
+
+-   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
+    successful (zero) exit status does not prevent future process runs.
+    Instead, the `Process` reinvokes after `min_duration` seconds.
+    However, the maximum failure limit (`max_failures`) still
+    applies. A combination of `daemon=True` and `max_failures=0` retries
+    a `Process` indefinitely regardless of exit status. This should
+    generally be avoided for very short-lived processes because of the
+    accumulation of checkpointed state for each process run. When
+    running in Aurora, `max_failures` is capped at
+    100.
+
+-   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
+    `Process`' status is not used to determine if its bound `Task` has
+    completed. For example, consider a `Task` with a
+    non-ephemeral webserver process and an ephemeral logsaver process
+    that periodically checkpoints its log files to a centralized data
+    store. The `Task` is considered finished once the webserver process
+    finishes, regardless of the logsaver's current status.
+
+-   `min_duration`: Defaults to `15`. Processes may succeed or fail
+    multiple times during a single Task. Each result is called a
+    *process run* and this value is the minimum number of seconds the
+    scheduler waits before re-running the same process.
+
+-   `final`: Defaulting to `False`, this is a finalizing `Process` that
+    should run last. Processes can be grouped into two classes:
+    *ordinary* and *finalizing*. By default, Thermos Processes are
+    ordinary. They run as long as the `Task` is considered
+    healthy (i.e. hasn't reached a failure limit). But once all regular
+    Thermos Processes have either finished or the `Task` has reached a
+    certain failure threshold, Thermos moves into a *finalization* stage
+    and runs all finalizing Processes. These are typically necessary for
+    cleaning up after the `Task`, such as log checkpointers, or perhaps
+    e-mail notifications of a completed Task. Finalizing processes may
+    not depend upon ordinary processes or vice-versa, however finalizing
+    processes may depend upon other finalizing processes and will
+    otherwise run as a typical process schedule.
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+There are four optional Task attributes:
+
+-   `constraints`: A list of `Constraint` objects that constrain the
+    Task's processes. Currently there is only one type, the `order`
+    constraint. For example the following requires that the processes
+    run in the order `foo`, then `bar`.
+
+        constraints = [Constraint(order=['foo', 'bar'])]
+
+    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
+    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
+    `order()` accepts Process name strings `('foo', 'bar')` or the processes
+    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+    `constraints=order(foo, bar)`
+
+    Note that Thermos rejects tasks with process cycles.
+
+-   `max_failures`: Defaulting to `1`, the number of failed processes
+    needed for the `Task` to be marked as failed. Note how this
+    interacts with individual Processes' `max_failures` values. Assume a
+    Task has two Processes and a `max_failures` value of `2`. So both
+    Processes must fail for the Task to fail. Now, assume each of the
+    Task's Processes has its own `max_failures` value of `10`. If
+    Process "A" fails 5 times before succeeding, and Process "B" fails
+    10 times and is then marked as failing, their parent Task succeeds.
+    Even though there were 15 individual failures by its Processes, only
+    1 of its Processes was finally marked as failing. Since 1 is less
+    than the 2 that is the Task's `max_failures` value, the Task does
+    not fail.
+
+-   `max_concurrency`: Defaulting to `0`, the maximum number of
+    concurrent processes in the Task. `0` specifies unlimited
+    concurrency. For Tasks with many expensive but otherwise independent
+    processes, you can limit the amount of concurrency Thermos schedules
+    instead of artificially constraining them through `order`
+    constraints. For example, a test framework may generate a Task with
+    100 test run processes, but runs it in a Task with
+    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
+    `max_concurrency=4`.
+
+-   `finalization_wait`: Defaulting to `30`, the number of seconds
+    allocated for finalizing the Task's processes. A Task starts in
+    `ACTIVE` state when Processes run and stays there as long as the Task
+    is healthy and Processes run. When all Processes finish successfully
+    or the Task reaches its maximum process failure limit, it goes into
+    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
+    Processes. When all Processes terminate, the Task goes into
+    `FINALIZING` state and invokes the schedule of all processes whose
+    final attribute has a True value. Everything from the end of `ACTIVE`
+    to the end of `FINALIZING` must happen within `finalization_wait`
+    number of seconds. If not, all still running Processes are sent
+    `SIGKILL`s (or if dependent on yet to be completed Processes, are
+    never invoked).
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. The first (strongly recommended) optional attribute is:
+
+-   `contact`: An email address for the Job's owner. For production
+    jobs, it is usually a team mailing list.
+
+Two more attributes deal with how to handle failure of the Job's Task:
+
+-   `max_task_failures`: An integer, defaulting to `1`, of the maximum
+    number of Task failures after which the Job is considered failed.
+    `-1` allows for infinite failures.
+
+-   `service`: A boolean, defaulting to `False`, which if `True`
+    restarts tasks regardless of whether they succeeded or failed. In
+    other words, if `True`, after the Job's Task completes, it
+    automatically starts again. This is for Jobs you want to run
+    continuously, rather than doing a single run.
+
+Three attributes deal with configuring the Job's Task:
+
+-   `instances`: Defaulting to `1`, the number of
+    instances/replicas/shards of the Job's Task to create.
+
+-   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
+    for which higher values may preempt Tasks from Jobs with lower
+    values.
+
+-   `production`: a Boolean, defaulting to `False`, specifying that this
+    is a production job backed by quota. Tasks from production Jobs may
+    preempt tasks from any non-production job, and may only be preempted
+    by tasks from production jobs in the same role with higher
+    priority. **WARNING**: To run Jobs at this level, the Job role must
+    have the appropriate quota. To grant quota to a particular role in
+    production, operators use the ``aurora_admin set_quota`` command.
+
+The final three Job attributes each take an object as their value.
+
+-   `update_config`: An `UpdateConfig`
+    object provides parameters for controlling the rate and policy of
+    rolling updates. The `UpdateConfig` parameters are:
+    -   `batch_size`: An integer, defaulting to `1`, specifying the
+        maximum number of shards to update in one iteration.
+    -   `restart_threshold`: An integer, defaulting to `60`, specifying
+        the maximum number of seconds before a shard must move into the
+        `RUNNING` state before considered a failure.
+    -   `watch_secs`: An integer, defaulting to `45`, specifying the
+        minimum number of seconds a shard must remain in the `RUNNING`
+        state before considered a success.
+    -   `max_per_shard_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of restarts per shard during an
+        update. When the limit is exceeded, it increments the total
+        failure count.
+    -   `max_total_failures`: An integer, defaulting to `0`, specifying
+        the maximum number of shard failures tolerated during an update.
+        Cannot be equal to or greater than the job's total number of
+        tasks.
+-   `health_check_config`: A `HealthCheckConfig` object that provides
+    parameters for controlling a Task's health checks via HTTP. Only
+    used if a health port was assigned with a command line wildcard. The
+    `HealthCheckConfig` parameters are:
+    -   `initial_interval_secs`: An integer, defaulting to `15`,
+        specifying the initial delay for doing an HTTP health check.
+    -   `interval_secs`: An integer, defaulting to `10`, specifying the
+        number of seconds in the interval between checking the Task's
+        health.
+    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
+        number of seconds the application must respond to an HTTP health
+        check with `OK` before it is considered a failure.
+    -   `max_consecutive_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of consecutive failures before a
+        task is unhealthy.
+-   `constraints`: A `dict` Python object, specifying Task scheduling
+    constraints. Most users will not need to specify constraints, as the
+    scheduler automatically inserts reasonable defaults. Please do not
+    set this field unless you are sure of what you are doing. See the
+    section in the Aurora + Thermos Reference manual on [Specifying
+    Scheduling Constraints](/documentation/0.8.0/configuration-reference/) for more information.
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.8.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.8.0/contributing.md b/source/documentation/0.8.0/contributing.md
new file mode 100644
index 0000000..14d93a2
--- /dev/null
+++ b/source/documentation/0.8.0/contributing.md
@@ -0,0 +1,76 @@
+Get the Source Code
+-------------------
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+Find Something to Do
+--------------------
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+Getting your ReviewBoard Account
+--------------------------------
+Go to https://reviews.apache.org and create an account.
+
+Setting up your ReviewBoard Environment
+---------------------------------------
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+Submitting a Patch for Review
+-----------------------------
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+Updating an Existing Review
+---------------------------
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+Merging Your Own Review (Committers)
+------------------------------------
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+Merging Someone Else's Review
+-----------------------------
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Cleaning Up
+-----------
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.8.0/cron-jobs.md b/source/documentation/0.8.0/cron-jobs.md
new file mode 100644
index 0000000..8304c8a
--- /dev/null
+++ b/source/documentation/0.8.0/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.8.0/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.8.0/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.8.0/configuration-reference/#task-objects) object. To get "run-until-failure" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.8.0/deploying-aurora-scheduler.md b/source/documentation/0.8.0/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..e2d17ce
--- /dev/null
+++ b/source/documentation/0.8.0/deploying-aurora-scheduler.md
@@ -0,0 +1,289 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+  - [Security Considerations](#security-considerations)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+  - [Tasks are stuck in PENDING forever](#tasks-are-stuck-in-pending-forever)
+    - [Symptoms](#symptoms-2)
+    - [Solution](#solution-2)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 7 or higher) and libmesos (0.22.0) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/aurora.git
+    cd aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.8.0/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on a majority of the masters.
+
+    mesos-log initialize --path="/path/to/native/log"
+
+The `--path` flag should match the `--native_log_file_path` flag to the scheduler.
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.8.0/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+*Note: Docker support is currently EXPERIMENTAL.*
+
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma seperated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7
+installed.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+## Security Considerations
+
+See [security.md](/documentation/0.8.0/security/).
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.8.0/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.8.0/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="host:$HOST;rack:$RACK;dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+### Tasks are stuck in `PENDING` forever
+
+#### Symptoms
+The scheduler is registered, and (receiving offers](docs/monitoring.md#scheduler_resource_offers),
+but tasks are perpetually shown as `PENDING - Constraint not satisfied: host`.
+
+#### Solution
+Check that your slaves are configured with `host` and `rack` attributes.  Aurora requires that
+slaves are tagged with these two common failure domains to ensure that it can safely place tasks
+such that jobs are resilient to failure.
+
+See our [vagrant example](examples/vagrant/upstart/mesos-slave.conf) for details.
diff --git a/source/documentation/0.8.0/design/command-hooks.md b/source/documentation/0.8.0/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.8.0/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.8.0/developing-aurora-client.md b/source/documentation/0.8.0/developing-aurora-client.md
new file mode 100644
index 0000000..4475890
--- /dev/null
+++ b/source/documentation/0.8.0/developing-aurora-client.md
@@ -0,0 +1,91 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.8.0/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.8.0/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client/cli:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have changed you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](images/debug-client-test.png)](images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](images/debugging-client-test.png)](images/debugging-client-test.png)
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.8.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.8.0/developing-aurora-scheduler.md b/source/documentation/0.8.0/developing-aurora-scheduler.md
new file mode 100644
index 0000000..1ccaf57
--- /dev/null
+++ b/source/documentation/0.8.0/developing-aurora-scheduler.md
@@ -0,0 +1,112 @@
+Java code in the aurora repo is built with [Gradle](http://gradle.org).
+
+Getting Started
+===============
+You will need Java 7 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+Developing Aurora UI
+======================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle 1.8's
+[Wrapper](http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.8.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.8.0/hooks.md b/source/documentation/0.8.0/hooks.md
new file mode 100644
index 0000000..63dbdb9
--- /dev/null
+++ b/source/documentation/0.8.0/hooks.md
@@ -0,0 +1,246 @@
+# Hooks for Aurora Client API
+
+- [Introduction](#introduction)
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+## Introduction
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+```python
+def pre_create(...):
+  do_something()  # if do_something fails with an exception, the create_job is not attempted!
+  return True
+
+# However...
+def pre_create(...):
+  try:
+    do_something()  # may cause exception
+  except Exception:  # generic error trap will catch it
+    pass  # and ignore the exception
+  return True  # create_job will run in any case!
+```
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```cancel_update``` | ```self```, ```job_key``` | ```job cancel-update```
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```update_job``` | ```self```, ```config```, ```health_check_interval_seconds=3```, ```shards=None``` | ```job update```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+```python
+jobs = [
+        Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+       ]
+jobs.extend(
+   Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+   # Hooks disabled for these jobs
+```
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+```python
+# Defines a method pre_kill_job
+class KillConfirmer(object):
+  def confirm(self, msg):
+    return raw_input(msg).lower() == 'yes'
+
+  def pre_kill_job(self, job_key, shards=None):
+    shards = ('shards %s' % shards) if shards is not None else 'all shards'
+    return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                        % (job_key, shards))
+```
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+```python
+generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+```
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+```python
+# Overrides the standard do-nothing generic_hook by adding a log writing operation.
+from twitter.common import log
+  class Logger(object):
+    '''Adds to the log every time a hookable API method is called'''
+    def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+      log.info('%s: %s_%s of %s'
+               % (self.__class__.__name__, event, method_name, hook_config.job_key))
+```
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+```python
+hooks = []
+```
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+```python
+hooks = [Object_hook_definer1, Object_hook_definer2]
+```
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.8.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.8.0/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.8.0/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.8.0/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.8.0/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.8.0/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.8.0/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.8.0/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.8.0/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.8.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.8.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.8.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.8.0/images/debug-client-test.png b/source/documentation/0.8.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.8.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.8.0/images/debugging-client-test.png b/source/documentation/0.8.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.8.0/images/debugging-client-test.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.8.0/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.8.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.8.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.8.0/images/lifeofatask.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.8.0/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.8.0/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.8.0/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.8.0/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.8.0/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.8.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.8.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.8.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.8.0/index.html.md b/source/documentation/0.8.0/index.html.md
new file mode 100644
index 0000000..f34ce3a
--- /dev/null
+++ b/source/documentation/0.8.0/index.html.md
@@ -0,0 +1,33 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+ 
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+We encourage you to ask questions on the [Aurora developer list](http://aurora.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.8.0/vagrant/)
+ * [Hello World Tutorial](/documentation/0.8.0/tutorial/)
+ * [User Guide](/documentation/0.8.0/user-guide/)
+ * [Configuration Tutorial](/documentation/0.8.0/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.8.0/configuration-reference/)
+ * [Command Line Client](/documentation/0.8.0/client-commands/)
+ * [Cron Jobs](/documentation/0.8.0/cron-jobs/)
+
+## Operators
+ * [Deploy Aurora](/documentation/0.8.0/deploying-aurora-scheduler/)
+ * [Monitoring](/documentation/0.8.0/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.8.0/hooks/)
+ * [Scheduler Storage](/documentation/0.8.0/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.8.0/storage-config/)
+ * [SLA Measurement](/documentation/0.8.0/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.8.0/resource-isolation/)
+ * [Generating test resources](/documentation/0.8.0/test-resource-generation/)
+
+## Developers
+ * [Contributing to the project](contributing/)
+ * [Developing the Aurora Scheduler](/documentation/0.8.0/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.8.0/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.8.0/committers/)
diff --git a/source/documentation/0.8.0/monitoring.md b/source/documentation/0.8.0/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.8.0/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/latest/resource-isolation.md b/source/documentation/0.8.0/resource-isolation.md
similarity index 100%
rename from source/documentation/latest/resource-isolation.md
rename to source/documentation/0.8.0/resource-isolation.md
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.8.0/scheduler-storage.md
similarity index 100%
copy from source/documentation/latest/scheduler-storage.md
copy to source/documentation/0.8.0/scheduler-storage.md
diff --git a/source/documentation/0.8.0/security.md b/source/documentation/0.8.0/security.md
new file mode 100644
index 0000000..8fcbadb
--- /dev/null
+++ b/source/documentation/0.8.0/security.md
@@ -0,0 +1,271 @@
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure.
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available.
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/client/cli:kaurora
+./pants binary src/main/python/apache/aurora/admin:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.8.0/sla.md b/source/documentation/0.8.0/sla.md
new file mode 100644
index 0000000..3d56e95
--- /dev/null
+++ b/source/documentation/0.8.0/sla.md
@@ -0,0 +1,176 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature currently supports stat collection only for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config).
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
\ No newline at end of file
diff --git a/source/documentation/0.8.0/storage-config.md b/source/documentation/0.8.0/storage-config.md
new file mode 100644
index 0000000..aabbc35
--- /dev/null
+++ b/source/documentation/0.8.0/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.8.0/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.8.0/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360min`
+  * Make sure `-gc_executor_path` option is not set to prevent accidental task GC. This is
+    important as scheduler will attempt to reconcile the cluster state and will kill all tasks when
+    restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize <-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.8.0/storage.md b/source/documentation/0.8.0/storage.md
new file mode 100644
index 0000000..e204390
--- /dev/null
+++ b/source/documentation/0.8.0/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.8.0/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.8.0/test-resource-generation.md b/source/documentation/0.8.0/test-resource-generation.md
new file mode 100644
index 0000000..c906dcf
--- /dev/null
+++ b/source/documentation/0.8.0/test-resource-generation.md
@@ -0,0 +1,25 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by other components (the GC executor
+and the thermos observer), it is important that we have tests that prevent
+regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.8.0/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.8.0/thrift-deprecation.md b/source/documentation/0.8.0/thrift-deprecation.md
new file mode 100644
index 0000000..49e4915
--- /dev/null
+++ b/source/documentation/0.8.0/thrift-deprecation.md
@@ -0,0 +1,50 @@
+# Thrift API Changes
+
+## Overview
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+## Checklist
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+## Deprecation cycle
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used
+* Check [storage.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/storage.thrift)) to see if the
+affected struct is stored in Aurora scheduler storage. If so, you most likely need to backfill
+existing data to ensure both fields are populated eagerly on startup
+See [StorageBackfill.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/storage/StorageBackfill.java))
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove the deprecated Thrift field
+
+## Testing
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](/documentation/0.8.0/vagrant/)
+for more.
+
diff --git a/source/documentation/0.8.0/tutorial.md b/source/documentation/0.8.0/tutorial.md
new file mode 100644
index 0000000..876bfb8
--- /dev/null
+++ b/source/documentation/0.8.0/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.apache.org](mailto:dev@aurora.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.8.0/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.8.0/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.8.0/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora job create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora job update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.8.0/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.8.0/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.8.0/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.8.0/client-commands/) document.
diff --git a/source/documentation/0.8.0/user-guide.md b/source/documentation/0.8.0/user-guide.md
new file mode 100644
index 0000000..a9510f8
--- /dev/null
+++ b/source/documentation/0.8.0/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.8.0/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora job update <job_key_value>/0-1 new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.8.0/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.8.0/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.8.0/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.8.0/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.8.0/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora job open`:
+
+      aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.8.0/client-commands/).
diff --git a/source/documentation/0.8.0/vagrant.md b/source/documentation/0.8.0/vagrant.md
new file mode 100644
index 0000000..0ac9268
--- /dev/null
+++ b/source/documentation/0.8.0/vagrant.md
@@ -0,0 +1,137 @@
+Getting Started
+===============
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone http://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](/documentation/0.8.0/tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/latest/client-cluster-configuration.md b/source/documentation/0.9.0/client-cluster-configuration.md
similarity index 100%
copy from source/documentation/latest/client-cluster-configuration.md
copy to source/documentation/0.9.0/client-cluster-configuration.md
diff --git a/source/documentation/0.9.0/client-commands.md b/source/documentation/0.9.0/client-commands.md
new file mode 100644
index 0000000..fb3a392
--- /dev/null
+++ b/source/documentation/0.9.0/client-commands.md
@@ -0,0 +1,410 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+        - [Client-orchestrated updates (deprecated)](#client-orchestrated-updates-deprecated)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+```javascript
+[{
+  "name": "example",
+  "scheduler_uri": "localhost:55555",
+}]
+```
+
+A configuration for a leader-elected scheduler would contain something like:
+
+```javascript
+[{
+  "name": "example",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler"
+}]
+```
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](/documentation/0.9.0/client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job cancel-update`
+  - `job create`
+  - `job kill`
+  - `job restart`
+  - `job update`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/0.9.0/hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Updating a Job
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+#### Coordinated job updates
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](/documentation/0.9.0/configuration-reference/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+#### Client-orchestrated updates (deprecated)
+
+*Note: This feature is deprecated and will be removed in 0.9.0.
+Please use aurora update instead.*
+
+    aurora job update CLUSTER/ROLE/ENV/NAME[/INSTANCES] <configuration file>
+    aurora job cancel-update CLUSTER/ROLE/ENV/NAME
+
+Given a running job, does a rolling update to reflect a new
+configuration version. Only updates Tasks in the Job with a changed
+configuration. You can further restrict the operated on Tasks by specifying
+specific instances that should be updated.
+
+You may want to run `aurora job diff` beforehand to validate which Tasks
+have different configurations.
+
+Updating jobs are locked to be sure the update finishes without
+disruption. If the update abnormally terminates, the lock may stay
+around and cause failure of subsequent update attempts.
+ `aurora job cancel-update `unlocks the Job specified by
+its `job_key` argument. Be sure you don't issue `job cancel-update` when
+another user is working with the specified Job.
+
+The `<configuration file>` argument for `job cancel-update` is optional. Use
+it only if it contains hook definitions and activations that affect the
+`cancel_update` command. The `<configuration file>` argument for
+`update` is required, but in addition to a new configuration it can be
+used to define and activate hooks for `job update`.
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[cron-jobs.md](/documentation/0.9.0/cron-jobs/) for more details.
+
+You will see various commands and options relating to cron jobs in
+`aurora -h` and similar. Ignore them, as they're not yet implemented.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+  Prints the production quota allocated to the role's value at the given
+cluster.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/0.9.0/committers.md b/source/documentation/0.9.0/committers.md
new file mode 100644
index 0000000..e6b1646
--- /dev/null
+++ b/source/documentation/0.9.0/committers.md
@@ -0,0 +1,79 @@
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+  http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+  http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora.git
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
+and checksums by running
+
+				./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
+run again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
+
diff --git a/source/documentation/0.9.0/configuration-reference.md b/source/documentation/0.9.0/configuration-reference.md
new file mode 100644
index 0000000..cb7e5fa
--- /dev/null
+++ b/source/documentation/0.9.0/configuration-reference.md
@@ -0,0 +1,599 @@
+Aurora + Thermos Configuration Reference
+========================================
+
+- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
+- [Introduction](#introduction)
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+      - [name](#name)
+      - [cmdline](#cmdline)
+      - [max_failures](#max_failures)
+      - [daemon](#daemon)
+      - [ephemeral](#ephemeral)
+      - [min_duration](#min_duration)
+      - [final](#final)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+      - [name](#name-1)
+      - [processes](#processes)
+        - [constraints](#constraints)
+      - [resources](#resources)
+      - [max_failures](#max_failures-1)
+      - [max_concurrency](#max_concurrency)
+      - [finalization_wait](#finalization_wait)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [Services](#services)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+- [Basic Examples](#basic-examples)
+    - [hello_world.aurora](#hello_worldaurora)
+    - [Environment Tailoring](#environment-tailoring)
+      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
+
+Introduction
+============
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](/documentation/0.9.0/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](/documentation/0.9.0/configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+For additional basic configuration examples, see [the end of this document](#BasicExamples).
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of times processes that are part of this
+Task can fail before the entire Task is marked for failure.
+
+For example:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as
+permanently failed, and the `succeeding` Process would succeed on the
+first run. The task would succeed despite only allowing for two failed
+processes. To be more specific, there would be 10 failed process runs
+yet 1 failed process.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
+`ACTIVE` stage is when ordinary processes run. This stage lasts as
+long as Processes are running and the Task is healthy. The moment either
+all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+Client applications with higher priority may force a shorter
+finalization wait (e.g. through parameters to `thermos kill`), so this
+is mostly a best-effort signal.
+
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](/documentation/0.9.0/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+
+
+Job Schema
+==========
+
+### Job Objects
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/0.9.0/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota. To grant quota to a particular role in production, operators use the ``aurora_admin set_quota`` command.
+  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
+  ```container``` | ```Container``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+
+### Services
+
+Jobs with the `service` flag set to True are called Services. The `Service`
+alias can be used as shorthand for `Job` with `service=True`.
+Services are differentiated from non-service Jobs in that tasks
+always restart on completion, whether successful or unsuccessful.
+Jobs without the service bit set only restart up to
+`max_task_failures` times and only if they terminated unsuccessfully
+either due to human error or machine failure.
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](/documentation/0.9.0/client-commands/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
+| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the health check if the response code differs. (Default: 0)
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor.  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/0.9.0/user-guide/).
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+
+### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as slave attributes should be used to enforce such
+guarantees should they be needed.
+
+### Container Object
+
+*Note: The only container type currently supported is "docker".  Docker support is currently EXPERIMENTAL.*
+*Note: In order to correctly execute processes inside a job, the Docker container must have python 2.7 installed.*
+
+Describes the container the job's processes will run inside.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```docker```   | Docker         | A docker container to use.
+
+### Docker Object
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```image```    | String         | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HTTPLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HTTPLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HTTPLifecycleConfig Objects
+
+  param          | type            | description
+  -----          | :----:          | -----------
+  ```port```     | String          | The named port to send POST commands (Default: health)
+  ```graceful_shutdown_endpoint``` | String | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint``` | String | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+
+#### graceful_shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked a fixed amount of
+time later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HTTPLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after this, it will be forcefully killed
+
+
+Specifying Scheduling Constraints
+=================================
+
+Most users will not need to specify constraints explicitly, as the
+scheduler automatically inserts reasonable defaults that attempt to
+ensure reliability without impacting schedulability. For example, the
+scheduler inserts a `host: limit:1` constraint, ensuring
+that your shards run on different physical machines. Please do not
+set this field unless you are sure of what you are doing.
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+Each slave in the cluster is assigned a set of string-valued
+key/value pairs called attributes. For example, consider the host
+`cluster1-aaa-03-sr2` and its following attributes (given in key:value
+format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+You can also control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run
+on a single host. Think of this as a "group by" limit.
+
+    constraints = {
+      'host': 'limit:2',
+    }
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains the `instance` variable that can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+#### hello_world_productionized.aurora
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/0.9.0/configuration-tutorial.md b/source/documentation/0.9.0/configuration-tutorial.md
new file mode 100644
index 0000000..3690ebe
--- /dev/null
+++ b/source/documentation/0.9.0/configuration-tutorial.md
@@ -0,0 +1,1125 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora job update`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](/documentation/0.9.0/tutorial/).
+
+- [Aurora Configuration Tutorial](#aurora-configuration-tutorial)
+	- [The Basics](#the-basics)
+		- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+	- [An Example Configuration File](#an-example-configuration-file)
+	- [Defining Process Objects](#defining-process-objects)
+	- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+	- [Defining Task Objects](#defining-task-objects)
+		- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+		- [SimpleTask](#simpletask)
+		- [Combining tasks](#combining-tasks)
+	- [Defining Job Objects](#defining-job-objects)
+	- [The jobs List](#the-jobs-list)
+	- [Templating](#templating)
+		- [Templating 1: Binding in Pystachio](#templating-1-binding-in-pystachio)
+		- [Structurals in Pystachio / Aurora](#structurals-in-pystachio--aurora)
+			- [Mustaches Within Structurals](#mustaches-within-structurals)
+		- [Templating 2: Structurals Are Factories](#templating-2-structurals-are-factories)
+			- [A Second Way of Templating](#a-second-way-of-templating)
+		- [Advanced Binding](#advanced-binding)
+			- [Bind Syntax](#bind-syntax)
+			- [Binding Complex Objects](#binding-complex-objects)
+				- [Lists](#lists)
+				- [Maps](#maps)
+				- [Structurals](#structurals)
+		- [Structural Binding](#structural-binding)
+	- [Configuration File Writing Tips And Best Practices](#configuration-file-writing-tips-and-best-practices)
+		- [Use As Few .aurora Files As Possible](#use-as-few-aurora-files-as-possible)
+		- [Avoid Boilerplate](#avoid-boilerplate)
+		- [Thermos Uses bash, But Thermos Is Not bash](#thermos-uses-bash-but-thermos-is-not-bash)
+			- [Bad](#bad)
+			- [Good](#good)
+		- [Rarely Use Functions In Your Configurations](#rarely-use-functions-in-your-configurations)
+			- [Bad](#bad-1)
+			- [Good](#good-1)
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora+Thermos Configuration
+Reference*](/documentation/0.9.0/configuration-reference/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora+Thermos Configuration Reference*](/documentation/0.9.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has five optional attributes, each with a default value
+if one isn't specified in the configuration:
+
+-   `max_failures`: Defaulting to `1`, the maximum number of failures
+    (non-zero exit statuses) before this `Process` is marked permanently
+    failed and not retried. If a `Process` permanently fails, Thermos
+    checks the `Process` object's containing `Task` for the task's
+    failure limit (usually 1) to determine whether or not the `Task`
+    should be failed. Setting `max_failures`to `0` means that this
+    process will keep retrying until a successful (zero) exit status is
+    achieved. Retries happen at most once every `min_duration` seconds
+    to prevent effectively mounting a denial of service attack against
+    the coordinating scheduler.
+
+-   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
+    successful (zero) exit status does not prevent future process runs.
+    Instead, the `Process` reinvokes after `min_duration` seconds.
+    However, the maximum failure limit (`max_failures`) still
+    applies. A combination of `daemon=True` and `max_failures=0` retries
+    a `Process` indefinitely regardless of exit status. This should
+    generally be avoided for very short-lived processes because of the
+    accumulation of checkpointed state for each process run. When
+    running in Aurora, `max_failures` is capped at
+    100.
+
+-   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
+    `Process`' status is not used to determine if its bound `Task` has
+    completed. For example, consider a `Task` with a
+    non-ephemeral webserver process and an ephemeral logsaver process
+    that periodically checkpoints its log files to a centralized data
+    store. The `Task` is considered finished once the webserver process
+    finishes, regardless of the logsaver's current status.
+
+-   `min_duration`: Defaults to `15`. Processes may succeed or fail
+    multiple times during a single Task. Each result is called a
+    *process run* and this value is the minimum number of seconds the
+    scheduler waits before re-running the same process.
+
+-   `final`: Defaulting to `False`, this is a finalizing `Process` that
+    should run last. Processes can be grouped into two classes:
+    *ordinary* and *finalizing*. By default, Thermos Processes are
+    ordinary. They run as long as the `Task` is considered
+    healthy (i.e. hasn't reached a failure limit). But once all regular
+    Thermos Processes have either finished or the `Task` has reached a
+    certain failure threshold, Thermos moves into a *finalization* stage
+    and runs all finalizing Processes. These are typically necessary for
+    cleaning up after the `Task`, such as log checkpointers, or perhaps
+    e-mail notifications of a completed Task. Finalizing processes may
+    not depend upon ordinary processes or vice-versa, however finalizing
+    processes may depend upon other finalizing processes and will
+    otherwise run as a typical process schedule.
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the slave can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+There are four optional Task attributes:
+
+-   `constraints`: A list of `Constraint` objects that constrain the
+    Task's processes. Currently there is only one type, the `order`
+    constraint. For example the following requires that the processes
+    run in the order `foo`, then `bar`.
+
+        constraints = [Constraint(order=['foo', 'bar'])]
+
+    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
+    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
+    `order()` accepts Process name strings `('foo', 'bar')` or the processes
+    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+    `constraints=order(foo, bar)`
+
+    Note that Thermos rejects tasks with process cycles.
+
+-   `max_failures`: Defaulting to `1`, the number of failed processes
+    needed for the `Task` to be marked as failed. Note how this
+    interacts with individual Processes' `max_failures` values. Assume a
+    Task has two Processes and a `max_failures` value of `2`. So both
+    Processes must fail for the Task to fail. Now, assume each of the
+    Task's Processes has its own `max_failures` value of `10`. If
+    Process "A" fails 5 times before succeeding, and Process "B" fails
+    10 times and is then marked as failing, their parent Task succeeds.
+    Even though there were 15 individual failures by its Processes, only
+    1 of its Processes was finally marked as failing. Since 1 is less
+    than the 2 that is the Task's `max_failures` value, the Task does
+    not fail.
+
+-   `max_concurrency`: Defaulting to `0`, the maximum number of
+    concurrent processes in the Task. `0` specifies unlimited
+    concurrency. For Tasks with many expensive but otherwise independent
+    processes, you can limit the amount of concurrency Thermos schedules
+    instead of artificially constraining them through `order`
+    constraints. For example, a test framework may generate a Task with
+    100 test run processes, but runs it in a Task with
+    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
+    `max_concurrency=4`.
+
+-   `finalization_wait`: Defaulting to `30`, the number of seconds
+    allocated for finalizing the Task's processes. A Task starts in
+    `ACTIVE` state when Processes run and stays there as long as the Task
+    is healthy and Processes run. When all Processes finish successfully
+    or the Task reaches its maximum process failure limit, it goes into
+    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
+    Processes. When all Processes terminate, the Task goes into
+    `FINALIZING` state and invokes the schedule of all processes whose
+    final attribute has a True value. Everything from the end of `ACTIVE`
+    to the end of `FINALIZING` must happen within `finalization_wait`
+    number of seconds. If not, all still running Processes are sent
+    `SIGKILL`s (or if dependent on yet to be completed Processes, are
+    never invoked).
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. The first (strongly recommended) optional attribute is:
+
+-   `contact`: An email address for the Job's owner. For production
+    jobs, it is usually a team mailing list.
+
+Two more attributes deal with how to handle failure of the Job's Task:
+
+-   `max_task_failures`: An integer, defaulting to `1`, of the maximum
+    number of Task failures after which the Job is considered failed.
+    `-1` allows for infinite failures.
+
+-   `service`: A boolean, defaulting to `False`, which if `True`
+    restarts tasks regardless of whether they succeeded or failed. In
+    other words, if `True`, after the Job's Task completes, it
+    automatically starts again. This is for Jobs you want to run
+    continuously, rather than doing a single run.
+
+Three attributes deal with configuring the Job's Task:
+
+-   `instances`: Defaulting to `1`, the number of
+    instances/replicas/shards of the Job's Task to create.
+
+-   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
+    for which higher values may preempt Tasks from Jobs with lower
+    values.
+
+-   `production`: a Boolean, defaulting to `False`, specifying that this
+    is a production job backed by quota. Tasks from production Jobs may
+    preempt tasks from any non-production job, and may only be preempted
+    by tasks from production jobs in the same role with higher
+    priority. **WARNING**: To run Jobs at this level, the Job role must
+    have the appropriate quota. To grant quota to a particular role in
+    production, operators use the ``aurora_admin set_quota`` command.
+
+The final three Job attributes each take an object as their value.
+
+-   `update_config`: An `UpdateConfig`
+    object provides parameters for controlling the rate and policy of
+    rolling updates. The `UpdateConfig` parameters are:
+    -   `batch_size`: An integer, defaulting to `1`, specifying the
+        maximum number of shards to update in one iteration.
+    -   `restart_threshold`: An integer, defaulting to `60`, specifying
+        the maximum number of seconds before a shard must move into the
+        `RUNNING` state before considered a failure.
+    -   `watch_secs`: An integer, defaulting to `45`, specifying the
+        minimum number of seconds a shard must remain in the `RUNNING`
+        state before considered a success.
+    -   `max_per_shard_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of restarts per shard during an
+        update. When the limit is exceeded, it increments the total
+        failure count.
+    -   `max_total_failures`: An integer, defaulting to `0`, specifying
+        the maximum number of shard failures tolerated during an update.
+        Cannot be equal to or greater than the job's total number of
+        tasks.
+-   `health_check_config`: A `HealthCheckConfig` object that provides
+    parameters for controlling a Task's health checks via HTTP. Only
+    used if a health port was assigned with a command line wildcard. The
+    `HealthCheckConfig` parameters are:
+    -   `initial_interval_secs`: An integer, defaulting to `15`,
+        specifying the initial delay for doing an HTTP health check.
+    -   `interval_secs`: An integer, defaulting to `10`, specifying the
+        number of seconds in the interval between checking the Task's
+        health.
+    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
+        number of seconds the application must respond to an HTTP health
+        check with `OK` before it is considered a failure.
+    -   `max_consecutive_failures`: An integer, defaulting to `0`,
+        specifying the maximum number of consecutive failures before a
+        task is unhealthy.
+-   `constraints`: A `dict` Python object, specifying Task scheduling
+    constraints. Most users will not need to specify constraints, as the
+    scheduler automatically inserts reasonable defaults. Please do not
+    set this field unless you are sure of what you are doing. See the
+    section in the Aurora + Thermos Reference manual on [Specifying
+    Scheduling Constraints](/documentation/0.9.0/configuration-reference/) for more information.
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs to run in the order listed. For example, the
+following runs first `job1`, then `job2`, then `job3`.
+
+jobs = [job1, job2, job3]
+
+Templating
+----------
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora+Thermos Configuration Reference](/documentation/0.9.0/configuration-reference/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+### Templating 1: Binding in Pystachio
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
+
+Configuration File Writing Tips And Best Practices
+--------------------------------------------------
+
+### Use As Few .aurora Files As Possible
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+### Avoid Boilerplate
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+### Thermos Uses bash, But Thermos Is Not bash
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+### Rarely Use Functions In Your Configurations
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/0.9.0/contributing.md b/source/documentation/0.9.0/contributing.md
new file mode 100644
index 0000000..14d93a2
--- /dev/null
+++ b/source/documentation/0.9.0/contributing.md
@@ -0,0 +1,76 @@
+Get the Source Code
+-------------------
+First things first, you'll need the source! The Aurora source is available from Apache git:
+
+    git clone https://gitbox.apache.org/repos/asf/aurora
+
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+Find Something to Do
+--------------------
+There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! Once
+you've got a patch, the next step is to post a review.
+
+Getting your ReviewBoard Account
+--------------------------------
+Go to https://reviews.apache.org and create an account.
+
+Setting up your ReviewBoard Environment
+---------------------------------------
+Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
+Subsequent runs will cache your login credentials.
+
+Submitting a Patch for Review
+-----------------------------
+Post a review with `rbt`, fill out the fields in your browser and hit Publish.
+
+    ./rbt post -o
+
+Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
+
+Updating an Existing Review
+---------------------------
+Incorporate review feedback, make some more commits, update your existing review, fill out the
+fields in your browser and hit Publish.
+
+    ./rbt post -o -r <RB_ID>
+
+Merging Your Own Review (Committers)
+------------------------------------
+Once you have shipits from the right committers, merge your changes in a single commit and mark
+the review as submitted. The typical workflow is:
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>  # Verify the automatically-generated commit message looks sane,
+                            # editing if necessary.
+    git show master         # Verify everything looks sane
+    git push origin master
+    ./rbt close <RB_ID>
+
+Note that even if you're developing using feature branches you will not use `git merge` - each
+commit will be an atomic change accompanied by a ReviewBoard entry.
+
+Merging Someone Else's Review
+-----------------------------
+Sometimes you'll need to merge someone else's RB. The typical workflow for this is
+
+    git checkout master
+    git pull origin master
+    ./rbt patch -c <RB_ID>
+    git show master  # Verify everything looks sane, author is correct
+    git push origin master
+
+Cleaning Up
+-----------
+Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
+next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
+"Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/0.9.0/cron-jobs.md b/source/documentation/0.9.0/cron-jobs.md
new file mode 100644
index 0000000..a1f99fa
--- /dev/null
+++ b/source/documentation/0.9.0/cron-jobs.md
@@ -0,0 +1,131 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+	- [KILL_EXISTING](#kill_existing)
+	- [CANCEL_NEW](#cancel_new)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](/documentation/0.9.0/configuration-reference/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](/documentation/0.9.0/vagrant/)):
+
+    $ cat /vagrant/examples/job/cron_hello_world.aurora
+    # cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available,
+[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
+
+### KILL_EXISTING
+
+The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+
+### CANCEL_NEW
+
+On a collision the new run is cancelled.
+
+Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](/documentation/0.9.0/configuration-reference/#task-objects) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/0.9.0/deploying-aurora-scheduler.md b/source/documentation/0.9.0/deploying-aurora-scheduler.md
new file mode 100644
index 0000000..0c6eace
--- /dev/null
+++ b/source/documentation/0.9.0/deploying-aurora-scheduler.md
@@ -0,0 +1,289 @@
+# Deploying the Aurora Scheduler
+
+When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
+machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
+
+- [Installing Aurora](#installing-aurora)
+  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
+  - [Installing Aurora](#installing-aurora-1)
+- [Configuring Aurora](#configuring-aurora)
+  - [A Note on Configuration](#a-note-on-configuration)
+  - [Replicated Log Configuration](#replicated-log-configuration)
+  - [Initializing the Replicated Log](#initializing-the-replicated-log)
+  - [Storage Performance Considerations](#storage-performance-considerations)
+  - [Network considerations](#network-considerations)
+  - [Considerations for running jobs in docker](#considerations-for-running-jobs-in-docker)
+  - [Security Considerations](#security-considerations)
+- [Running Aurora](#running-aurora)
+  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
+  - [Monitoring](#monitoring)
+  - [Running stateful services](#running-stateful-services)
+    - [Dedicated attribute](#dedicated-attribute)
+      - [Syntax](#syntax)
+      - [Example](#example)
+- [Common problems](#common-problems)
+  - [Replicated log not initialized](#replicated-log-not-initialized)
+    - [Symptoms](#symptoms)
+    - [Solution](#solution)
+  - [Scheduler not registered](#scheduler-not-registered)
+    - [Symptoms](#symptoms-1)
+    - [Solution](#solution-1)
+  - [Tasks are stuck in PENDING forever](#tasks-are-stuck-in-pending-forever)
+    - [Symptoms](#symptoms-2)
+    - [Solution](#solution-2)
+
+## Installing Aurora
+The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
+of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
+should have a JVM (Java 7 or higher) and libmesos (0.22.0) installed.
+
+### Creating the Distribution .zip File (Optional)
+To create a distribution for installation you will need build tools installed. On Ubuntu this can be
+done with `sudo apt-get install build-essential default-jdk`.
+
+    git clone http://gitbox.apache.org/repos/asf/aurora.git
+    cd aurora
+    ./gradlew distZip
+
+Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
+
+### Installing Aurora
+Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
+`/usr/local/aurora-scheduler`.
+
+    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
+    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
+
+## Configuring Aurora
+
+### A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation run
+
+    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
+
+### Replicated Log Configuration
+All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
+including where in the cluster they are being run and the configuration for running them, as
+well as other information such as metadata needed to reconnect to the Mesos master, resource
+quotas, and any other locks in place.
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
+`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
+should be set to `2`, and in a cluster of 5 it should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+See [this document](/documentation/0.9.0/storage-config/#scheduler-storage-configuration-flags) for more replicated
+log and storage configuration options.
+
+## Initializing the Replicated Log
+Before you start Aurora you will also need to initialize the log on a majority of the masters.
+
+    mesos-log initialize --path="/path/to/native/log"
+
+The `--path` flag should match the `--native_log_file_path` flag to the scheduler.
+Failing to do this will result the following message when you try to start the scheduler.
+
+    Replica in EMPTY status received a broadcasted recover request
+
+### Storage Performance Considerations
+
+See [this document](/documentation/0.9.0/scheduler-storage/) for scheduler storage performance considerations.
+
+### Network considerations
+The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
+and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
+replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
+to ZooKeeper) or explicitly set in the startup script as follows:
+
+    # ...
+    AURORA_FLAGS=(
+      # ...
+      -http_port=8081
+      # ...
+    )
+    # ...
+    export LIBPROCESS_PORT=8083
+    # ...
+
+### Considerations for running jobs in docker containers
+*Note: Docker support is currently EXPERIMENTAL.*
+
+In order for Aurora to launch jobs using docker containers, a few extra configuration options
+must be set.  The [docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+must be enabled on the mesos slaves by launching them with the `--containerizers=docker,mesos` option.
+
+By default, Aurora will configure Mesos to copy the file specified in `-thermos_executor_path`
+into the container's sandbox.  If using a wrapper script to launch the thermos executor,
+specify the path to the wrapper in that argument. In addition, the path to the executor pex itself
+must be included in the `-thermos_executor_resources` option. Doing so will ensure that both the
+wrapper script and executor are correctly copied into the sandbox. Finally, ensure the wrapper
+script does not access resources outside of the sandbox, as when the script is run from within a
+docker container those resources will not exist.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e., the slave)
+into all containers on that host. The format is a comma seperated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the slaves into all launched containers. Valid modes are `ro` and `rw`.
+
+In order to correctly execute processes inside a job, the docker container must have python 2.7
+installed.
+
+## Running Aurora
+Configure a supervisor like [Monit](http://mmonit.com/monit/) or
+[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
+whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
+supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
+out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+## Security Considerations
+
+See [security.md](/documentation/0.9.0/security/).
+
+### Maintaining an Aurora Installation
+
+### Monitoring
+Please see our dedicated [monitoring guide](/documentation/0.9.0/monitoring/) for in-depth discussion on monitoring.
+
+### Running stateful services
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+#### Dedicated attribute
+The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
+static attributes (not to be confused with `--resources`, which are dynamic and accounted).
+
+Aurora makes these attributes available for matching with scheduling
+[constraints](/documentation/0.9.0/configuration-reference/#specifying-scheduling-constraints).  Most of these
+constraints are arbitrary and available for custom use.  There is one exception, though: the
+`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
+machines, and will only schedule matching jobs on these machines.
+
+##### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this.
+
+##### Example
+Consider the following slave command line:
+
+    mesos-slave --attributes="host:$HOST;rack:$RACK;dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on slaves with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those slaves.
+
+
+## Common problems
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+### Replicated log not initialized
+
+#### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+#### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](#initializing-the-replicated-log) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+### Scheduler not registered
+
+#### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+#### Solution
+Double-check that the scheduler is configured correctly to reach the master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+### Tasks are stuck in `PENDING` forever
+
+#### Symptoms
+The scheduler is registered, and [receiving offers](/documentation/0.9.0/monitoring/#scheduler_resource_offers),
+but tasks are perpetually shown as `PENDING - Constraint not satisfied: host`.
+
+#### Solution
+Check that your slaves are configured with `host` and `rack` attributes.  Aurora requires that
+slaves are tagged with these two common failure domains to ensure that it can safely place tasks
+such that jobs are resilient to failure.
+
+See our [vagrant example](examples/vagrant/upstart/mesos-slave.conf) for details.
diff --git a/source/documentation/0.9.0/design/command-hooks.md b/source/documentation/0.9.0/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/0.9.0/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/0.9.0/developing-aurora-client.md b/source/documentation/0.9.0/developing-aurora-client.md
new file mode 100644
index 0000000..3d9323c
--- /dev/null
+++ b/source/documentation/0.9.0/developing-aurora-client.md
@@ -0,0 +1,91 @@
+Getting Started
+===============
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+Client Configuration
+====================
+
+The client uses a configuration file that specifies available clusters. More information about the
+contents of this file can be found in the
+[Client Cluster Configuration](/documentation/0.9.0/client-cluster-configuration/) documentation. Information about
+how the client locates this file can be found in the
+[Client Commands](/documentation/0.9.0/client-commands/#cluster-configuration) documentation.
+
+Building and Testing the Client
+===============================
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client/cli:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:all`
+
+Running/Debugging the Client
+============================
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master, a set
+of mesos slaves, and an aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+Running/Debugging the Client in PyCharm
+=======================================
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](images/debug-client-test.png)](images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](images/debugging-client-test.png)](images/debugging-client-test.png)
+
+### Running/Debugging the Client
+
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.9.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.9.0/developing-aurora-scheduler.md b/source/documentation/0.9.0/developing-aurora-scheduler.md
new file mode 100644
index 0000000..62910f3
--- /dev/null
+++ b/source/documentation/0.9.0/developing-aurora-scheduler.md
@@ -0,0 +1,112 @@
+Java code in the aurora repo is built with [Gradle](http://gradle.org).
+
+Getting Started
+===============
+You will need Java 7 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themself with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+Developing Aurora UI
+======================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle 1.8's
+[Wrapper](http://www.gradle.org/docs/1.8/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
+Making thrift schema changes
+============================
+See [this document](/documentation/0.9.0/thrift-deprecation/) for any thrift related changes.
diff --git a/source/documentation/0.9.0/hooks.md b/source/documentation/0.9.0/hooks.md
new file mode 100644
index 0000000..63dbdb9
--- /dev/null
+++ b/source/documentation/0.9.0/hooks.md
@@ -0,0 +1,246 @@
+# Hooks for Aurora Client API
+
+- [Introduction](#introduction)
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+## Introduction
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+```python
+def pre_create(...):
+  do_something()  # if do_something fails with an exception, the create_job is not attempted!
+  return True
+
+# However...
+def pre_create(...):
+  try:
+    do_something()  # may cause exception
+  except Exception:  # generic error trap will catch it
+    pass  # and ignore the exception
+  return True  # create_job will run in any case!
+```
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```cancel_update``` | ```self```, ```job_key``` | ```job cancel-update```
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```update_job``` | ```self```, ```config```, ```health_check_interval_seconds=3```, ```shards=None``` | ```job update```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+```python
+jobs = [
+        Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+       ]
+jobs.extend(
+   Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+   # Hooks disabled for these jobs
+```
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+```python
+# Defines a method pre_kill_job
+class KillConfirmer(object):
+  def confirm(self, msg):
+    return raw_input(msg).lower() == 'yes'
+
+  def pre_kill_job(self, job_key, shards=None):
+    shards = ('shards %s' % shards) if shards is not None else 'all shards'
+    return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                        % (job_key, shards))
+```
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+```python
+generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+```
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+```python
+# Overrides the standard do-nothing generic_hook by adding a log writing operation.
+from twitter.common import log
+  class Logger(object):
+    '''Adds to the log every time a hookable API method is called'''
+    def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+      log.info('%s: %s_%s of %s'
+               % (self.__class__.__name__, event, method_name, hook_config.job_key))
+```
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+```python
+hooks = []
+```
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+```python
+hooks = [Object_hook_definer1, Object_hook_definer2]
+```
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/publish/documentation/latest/images/CPUavailability.png b/source/documentation/0.9.0/images/CPUavailability.png
similarity index 100%
copy from publish/documentation/latest/images/CPUavailability.png
copy to source/documentation/0.9.0/images/CPUavailability.png
Binary files differ
diff --git a/publish/documentation/latest/images/HelloWorldJob.png b/source/documentation/0.9.0/images/HelloWorldJob.png
similarity index 100%
copy from publish/documentation/latest/images/HelloWorldJob.png
copy to source/documentation/0.9.0/images/HelloWorldJob.png
Binary files differ
diff --git a/publish/documentation/latest/images/RoleJobs.png b/source/documentation/0.9.0/images/RoleJobs.png
similarity index 100%
copy from publish/documentation/latest/images/RoleJobs.png
copy to source/documentation/0.9.0/images/RoleJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/ScheduledJobs.png b/source/documentation/0.9.0/images/ScheduledJobs.png
similarity index 100%
copy from publish/documentation/latest/images/ScheduledJobs.png
copy to source/documentation/0.9.0/images/ScheduledJobs.png
Binary files differ
diff --git a/publish/documentation/latest/images/TaskBreakdown.png b/source/documentation/0.9.0/images/TaskBreakdown.png
similarity index 100%
copy from publish/documentation/latest/images/TaskBreakdown.png
copy to source/documentation/0.9.0/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/documentation/latest/images/aurora_hierarchy.png b/source/documentation/0.9.0/images/aurora_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/aurora_hierarchy.png
copy to source/documentation/0.9.0/images/aurora_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/debug-client-test.png b/source/documentation/0.9.0/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/0.9.0/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/debugging-client-test.png b/source/documentation/0.9.0/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/0.9.0/images/debugging-client-test.png
Binary files differ
diff --git a/publish/documentation/latest/images/killedtask.png b/source/documentation/0.9.0/images/killedtask.png
similarity index 100%
copy from publish/documentation/latest/images/killedtask.png
copy to source/documentation/0.9.0/images/killedtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/lifeofatask.png b/source/documentation/0.9.0/images/lifeofatask.png
similarity index 100%
copy from publish/documentation/latest/images/lifeofatask.png
copy to source/documentation/0.9.0/images/lifeofatask.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/0.9.0/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/0.9.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/0.9.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/0.9.0/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/0.9.0/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/0.9.0/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/publish/documentation/latest/images/runningtask.png b/source/documentation/0.9.0/images/runningtask.png
similarity index 100%
copy from publish/documentation/latest/images/runningtask.png
copy to source/documentation/0.9.0/images/runningtask.png
Binary files differ
diff --git a/publish/documentation/latest/images/stderr.png b/source/documentation/0.9.0/images/stderr.png
similarity index 100%
copy from publish/documentation/latest/images/stderr.png
copy to source/documentation/0.9.0/images/stderr.png
Binary files differ
diff --git a/publish/documentation/latest/images/stdout.png b/source/documentation/0.9.0/images/stdout.png
similarity index 100%
copy from publish/documentation/latest/images/stdout.png
copy to source/documentation/0.9.0/images/stdout.png
Binary files differ
diff --git a/publish/documentation/latest/images/storage_hierarchy.png b/source/documentation/0.9.0/images/storage_hierarchy.png
similarity index 100%
copy from publish/documentation/latest/images/storage_hierarchy.png
copy to source/documentation/0.9.0/images/storage_hierarchy.png
Binary files differ
diff --git a/source/documentation/0.9.0/index.html.md b/source/documentation/0.9.0/index.html.md
new file mode 100644
index 0000000..0209d9e
--- /dev/null
+++ b/source/documentation/0.9.0/index.html.md
@@ -0,0 +1,36 @@
+## Introduction
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
+ 
+ * Users: General information about the project and to learn how to run an Aurora job.
+ * Operators: For those that wish to manage and fine-tune an Aurora cluster.
+ * Developers: All the information you need to start modifying Aurora and contributing back to the project.
+
+We encourage you to ask questions on the [Aurora developer list](http://aurora.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
+
+## Users
+ * [Install Aurora on virtual machines on your private machine](/documentation/0.9.0/vagrant/)
+ * [Hello World Tutorial](/documentation/0.9.0/tutorial/)
+ * [User Guide](/documentation/0.9.0/user-guide/)
+ * [Configuration Tutorial](/documentation/0.9.0/configuration-tutorial/)
+ * [Aurora + Thermos Reference](/documentation/0.9.0/configuration-reference/)
+ * [Command Line Client](/documentation/0.9.0/client-commands/)
+ * [Cron Jobs](/documentation/0.9.0/cron-jobs/)
+
+## Operators
+ * [Deploy Aurora](/documentation/0.9.0/deploying-aurora-scheduler/)
+ * [Monitoring](/documentation/0.9.0/monitoring/)
+ * [Hooks for Aurora Client API](/documentation/0.9.0/hooks/)
+ * [Scheduler Storage](/documentation/0.9.0/storage/)
+ * [Scheduler Storage and Maintenance](/documentation/0.9.0/storage-config/)
+ * [SLA Measurement](/documentation/0.9.0/sla/)
+ * [Resource Isolation and Sizing](/documentation/0.9.0/resource-isolation/)
+ * [Generating test resources](/documentation/0.9.0/test-resource-generation/)
+
+## Developers
+ * [Contributing to the project](contributing/)
+ * [Developing the Aurora Scheduler](/documentation/0.9.0/developing-aurora-scheduler/)
+ * [Developing the Aurora Client](/documentation/0.9.0/developing-aurora-client/)
+ * [Committers Guide](/documentation/0.9.0/committers/)
+ 
+## Additional Resources
+ * [Presentation videos and slides](/documentation/0.9.0/presentations/)
\ No newline at end of file
diff --git a/source/documentation/0.9.0/monitoring.md b/source/documentation/0.9.0/monitoring.md
new file mode 100644
index 0000000..3cb2a79
--- /dev/null
+++ b/source/documentation/0.9.0/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, slave, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, slave, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/0.9.0/presentations.md b/source/documentation/0.9.0/presentations.md
new file mode 100644
index 0000000..18bf490
--- /dev/null
+++ b/source/documentation/0.9.0/presentations.md
@@ -0,0 +1,50 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p>
+</td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/0.9.0/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
\ No newline at end of file
diff --git a/source/documentation/latest/resource-isolation.md b/source/documentation/0.9.0/resource-isolation.md
similarity index 100%
copy from source/documentation/latest/resource-isolation.md
copy to source/documentation/0.9.0/resource-isolation.md
diff --git a/source/documentation/latest/scheduler-storage.md b/source/documentation/0.9.0/scheduler-storage.md
similarity index 100%
copy from source/documentation/latest/scheduler-storage.md
copy to source/documentation/0.9.0/scheduler-storage.md
diff --git a/source/documentation/0.9.0/security.md b/source/documentation/0.9.0/security.md
new file mode 100644
index 0000000..8fcbadb
--- /dev/null
+++ b/source/documentation/0.9.0/security.md
@@ -0,0 +1,271 @@
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure.
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available.
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/client/cli:kaurora
+./pants binary src/main/python/apache/aurora/admin:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-343](https://issues.apache.org/jira/browse/AURORA-343): HTTPS support
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/0.9.0/sla.md b/source/documentation/0.9.0/sla.md
new file mode 100644
index 0000000..ac3af09
--- /dev/null
+++ b/source/documentation/0.9.0/sla.md
@@ -0,0 +1,177 @@
+Aurora SLA Measurement
+--------------
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production = True"` in your `.aurora` config). It can be enabled for
+non-production services via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java)) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceAggregates.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java))
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/0.9.0/storage-config.md b/source/documentation/0.9.0/storage-config.md
new file mode 100644
index 0000000..49b1ff2
--- /dev/null
+++ b/source/documentation/0.9.0/storage-config.md
@@ -0,0 +1,142 @@
+# Storage Configuration And Maintenance
+
+- [Overview](#overview)
+- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
+  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
+    - [-native_log_quorum_size](#-native_log_quorum_size)
+    - [-native_log_file_path](#-native_log_file_path)
+    - [-native_log_zk_group_path](#-native_log_zk_group_path)
+  - [Backup configuration flags](#backup-configuration-flags)
+    - [-backup_interval](#-backup_interval)
+    - [-backup_dir](#-backup_dir)
+    - [-max_saved_backups](#-max_saved_backups)
+- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
+  - [Summary](#summary)
+  - [Preparation](#preparation)
+  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
+  - [Restore from backup](#restore-from-backup)
+  - [Cleanup](#cleanup)
+
+## Overview
+
+This document summarizes Aurora storage configuration and maintenance details and is
+intended for use by anyone deploying and/or maintaining Aurora.
+
+For a high level overview of the Aurora storage architecture refer to [this document](/documentation/0.9.0/storage/).
+
+## Scheduler storage configuration flags
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### Mesos replicated log configuration flags
+
+#### -native_log_quorum_size
+Defines the Mesos replicated log quorum size. See
+[the replicated log configuration document](/documentation/0.9.0/deploying-aurora-scheduler/#replicated-log-configuration)
+on how to choose the right value.
+
+#### -native_log_file_path
+Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
+for Mesos replicated log files to ensure optimal storage performance.
+
+#### -native_log_zk_group_path
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java)) for
+other available Mesos replicated log configuration options and default values.
+
+### Backup configuration flags
+
+Configuration options for the Aurora scheduler backup manager.
+
+#### -backup_interval
+The interval on which the scheduler writes local storage backups.  The default is every hour.
+
+#### -backup_dir
+Directory to write backups to.
+
+#### -max_saved_backups
+Maximum number of backups to retain before deleting the oldest backup(s).
+
+## Recovering from a scheduler backup
+
+- [Overview](#overview)
+- [Preparation](#preparation)
+- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
+- [Restore from backup](#restore-from-backup)
+- [Cleanup](#cleanup)
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+### Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+### Preparation
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+* Stop all scheduler instances
+
+* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
+prevent users from interacting with the scheduler during the restoration process. This will help
+troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
+be erased after the backup snapshot is restored
+
+* Next steps are required to put scheduler into a partially disabled state where it would still be
+able to accept storage recovery requests but unable to schedule or change task states. This may be
+accomplished by updating the following scheduler configuration options:
+  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
+    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
+  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
+    and as a result scheduler suicide. E.g: `-max_registration_delay=360min`
+  * Make sure `-reconciliation_initial_delay` option is set high enough (e.g.: `365days`) to
+    prevent accidental task GC. This is important as scheduler will attempt to reconcile the cluster
+    state and will kill all tasks when restarted with an empty Mesos replicated log.
+
+* Restart all schedulers
+
+### Cleanup and re-initialize Mesos replicated log
+
+Get rid of the corrupted files and re-initialize Mesos replicate log:
+
+* Stop schedulers
+* Delete all files under `-native_log_file_path` on all schedulers
+* Initialize Mesos replica's log file: `mesos-log initialize <-native_log_file_path>`
+* Restart schedulers
+
+### Restore from backup
+
+At this point the scheduler is ready to rehydrate from the backup:
+
+* Identify the leading scheduler by:
+  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
+  * examining scheduler logs
+  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
+    and `-serverset_path`
+
+* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
+the following command on a leader
+`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
+
+* At this point, the recovery snapshot is staged and available for manual verification/modification
+via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
+See `aurora_admin help <command>` for usage details.
+
+* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
+the provided backup snapshot and initiate a mandatory failover
+`aurora_admin scheduler_commit_recovery <cluster>`
+
+### Cleanup
+Undo any modification done during [Preparation](#preparation) sequence.
+
diff --git a/source/documentation/0.9.0/storage.md b/source/documentation/0.9.0/storage.md
new file mode 100644
index 0000000..df00812
--- /dev/null
+++ b/source/documentation/0.9.0/storage.md
@@ -0,0 +1,88 @@
+#Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Reads, writes, modifications](#reads-writes-modifications)
+  - [Read lifecycle](#read-lifecycle)
+  - [Write lifecycle](#write-lifecycle)
+- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+- [Population on restart](#population-on-restart)
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
+log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](/documentation/0.9.0/storage-config/#recovering-from-a-scheduler-backup)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](images/storage_hierarchy.png)
+
+## Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+## Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+## Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
+
diff --git a/source/documentation/0.9.0/test-resource-generation.md b/source/documentation/0.9.0/test-resource-generation.md
new file mode 100644
index 0000000..700ae88
--- /dev/null
+++ b/source/documentation/0.9.0/test-resource-generation.md
@@ -0,0 +1,24 @@
+# Generating test resources
+
+## Background
+The Aurora source repository and distributions contain several
+[binary files](https://github.com/apache/aurora/blob/#{git_tag}/src/test/resources/org/apache/thermos/root/checkpoints)) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+## Generating test files
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](/documentation/0.9.0/configuration-reference/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/0.9.0/thrift-deprecation.md b/source/documentation/0.9.0/thrift-deprecation.md
new file mode 100644
index 0000000..2c26f08
--- /dev/null
+++ b/source/documentation/0.9.0/thrift-deprecation.md
@@ -0,0 +1,50 @@
+# Thrift API Changes
+
+## Overview
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/api.thrift)).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+## Checklist
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+## Deprecation cycle
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used
+* Check [storage.thrift](https://github.com/apache/aurora/blob/#{git_tag}/api/src/main/thrift/org/apache/aurora/gen/storage.thrift)) to see if the
+affected struct is stored in Aurora scheduler storage. If so, you most likely need to backfill
+existing data to ensure both fields are populated eagerly on startup
+See [StorageBackfill.java](https://github.com/apache/aurora/blob/#{git_tag}/src/main/java/org/apache/aurora/scheduler/storage/StorageBackfill.java))
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove the deprecated Thrift field
+
+## Testing
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](/documentation/0.9.0/vagrant/)
+for more.
+
diff --git a/source/documentation/0.9.0/tutorial.md b/source/documentation/0.9.0/tutorial.md
new file mode 100644
index 0000000..370c1a2
--- /dev/null
+++ b/source/documentation/0.9.0/tutorial.md
@@ -0,0 +1,265 @@
+Aurora Tutorial
+---------------
+
+- [Introduction](#introduction)
+- [Setup: Install Aurora](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+## Introduction
+
+This tutorial shows how to use the Aurora scheduler to run (and
+"`printf-debug`") a hello world program on Mesos. The operational
+hierarchy is:
+
+- Aurora manages and schedules jobs for Mesos to run.
+- Mesos manages the individual tasks that make up a job.
+- Thermos manages the individual processes that make up a task.
+
+This is the recommended first Aurora users document to read to start
+getting up to speed on the system.
+
+To get help, email questions to the Aurora Developer List,
+[dev@aurora.apache.org](mailto:dev@aurora.apache.org)
+
+## Setup: Install Aurora
+
+You use the Aurora client and web UI to interact with Aurora jobs. To
+install it locally, see [vagrant.md](/documentation/0.9.0/vagrant/). The remainder of this
+Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
+all commands are to be run from the root of the aurora repository clone.
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
+this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import sys
+import time
+
+def main(argv):
+  SLEEP_DELAY = 10
+  # Python ninjas - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    sys.stdout.flush()
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main(sys.argv)
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](/documentation/0.9.0/configuration-tutorial/) and the [Aurora + Thermos
+Reference](/documentation/0.9.0/configuration-reference/) (preferably after finishing this
+tutorial).
+
+## What's Going On In That Configuration File?
+
+More than you might think.
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order. When comparing two job keys, if any of the
+four parts is different from its counterpart in the other key, then the
+two job keys identify two separate jobs. If all four values are
+identical, the job keys identify the same job.
+
+`/etc/aurora/clusters.json` within the Aurora scheduler has the available
+cluster names. For Vagrant, from the top-level of your Aurora repository clone,
+do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@precise64:~$ cat /etc/aurora/clusters.json
+
+You'll see something like:
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED"
+}]
+```
+
+Use a `name` value for your job key's cluster value.
+
+Role names are user accounts existing on the slave machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This returns:
+
+    $ vagrant ssh
+    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
+
+     * Documentation:  https://help.ubuntu.com/
+    Welcome to your Vagrant-built virtual machine.
+    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
+    vagrant@precise64:~$ aurora job create devcluster/www-data/devel/hello_world \
+        /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
+      www-data/devel/hello_world)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task failed, so we have
+to figure out what went wrong.
+
+Access the page for our Task by clicking on its host.
+
+![Task page](images/TaskBreakdown.png)
+
+Once there, we see that the
+`hello_world` process failed. The Task page captures the standard error and
+standard output streams and makes them available. Clicking through
+to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function and
+we will try again.
+
+    aurora job update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+This time, the task comes up, we inspect the page, and see that the
+`hello_world` process is running.
+
+![Running Task page](images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@precise64:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Response from scheduler: OK (message: Tasks killed.)
+     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
+    vagrant@precise64:~$
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](/documentation/0.9.0/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora + Thermos Configuration Reference](/documentation/0.9.0/configuration-reference/).
+- The [Aurora User Guide](/documentation/0.9.0/user-guide/) provides an overview of how Aurora, Mesos, and
+  Thermos work "under the hood".
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](/documentation/0.9.0/client-commands/) document.
diff --git a/source/documentation/0.9.0/user-guide.md b/source/documentation/0.9.0/user-guide.md
new file mode 100644
index 0000000..a846dc1
--- /dev/null
+++ b/source/documentation/0.9.0/user-guide.md
@@ -0,0 +1,355 @@
+Aurora User Guide
+-----------------
+
+- [Overview](#overview)
+- [Job Lifecycle](#job-lifecycle)
+	- [Life Of A Task](#life-of-a-task)
+	- [PENDING to RUNNING states](#pending-to-running-states)
+	- [Task Updates](#task-updates)
+	- [HTTP Health Checking and Graceful Shutdown](#http-health-checking-and-graceful-shutdown)
+		- [Tearing a task down](#tearing-a-task-down)
+	- [Giving Priority to Production Tasks: PREEMPTING](#giving-priority-to-production-tasks-preempting)
+	- [Natural Termination: FINISHED, FAILED](#natural-termination-finished-failed)
+	- [Forceful Termination: KILLING, RESTARTING](#forceful-termination-killing-restarting)
+- [Service Discovery](#service-discovery)
+- [Configuration](#configuration)
+- [Creating Jobs](#creating-jobs)
+- [Interacting With Jobs](#interacting-with-jobs)
+
+Overview
+--------
+
+This document gives an overview of how Aurora works under the hood.
+It assumes you've already worked through the "hello world" example
+job in the [Aurora Tutorial](/documentation/0.9.0/tutorial/). Specifics of how to use Aurora are **not**
+ given here, but pointers to documentation about how to use Aurora are
+provided.
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-idential replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+However, since Jobs can be updated on the fly, a single Job identifier or *job key*
+can have multiple job configurations associated with it.
+
+For example, consider when I have a Job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. I want to
+update it so it requests 2 GB of RAM instead of 1. I create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue a `aurora job update <job_key_value>/0-1 new_hello_world.aurora`
+command.
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same Job
+at the same time, just valid for different ranges of instances.
+
+This isn't a recommended pattern, but it is valid and supported by the
+Aurora scheduler. This most often manifests in the "canary pattern" where
+instance 0 runs with a different configuration than instances 1-N to test
+different code versions alongside the actual production job.
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.6 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or slave processes. This is
+where Thermos  comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the Pystachio
+templating language. They end in a `.aurora` extension.
+
+Pystachio is a type-checked dictionary templating library.
+
+> TL;DR
+>
+> -   Aurora manages jobs made of tasks.
+> -   Mesos manages tasks made of processes.
+> -   Thermos manages processes.
+> -   All defined in `.aurora` configuration file.
+
+![Aurora hierarchy](images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
+Job Lifecycle
+-------------
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+### Life Of A Task
+
+![Life of a task](images/lifeofatask.png)
+
+### PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the slave
+machine containing `Task` configuration, which the slave uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgement that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the slave
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state.
+
+If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
+scheduler forces it into `LOST` state, creating a new `Task` in its
+place that's sent into `PENDING` state. This is technically true of any
+active state: if the Mesos core tells the scheduler that a slave has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+slave go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+If there is a state mismatch, (e.g. a machine returns from a `netsplit`
+and the scheduler has marked all its `Task`s `LOST` and rescheduled
+them), a state reconciliation process kills the errant `RUNNING` tasks,
+which may take up to an hour. But to emphasize this point: there is no
+uniqueness guarantee for a single instance of a job in the presence of
+network partitions. If the Task requires that, it should be baked in at
+the application level using a distributed coordination service such as
+Zookeeper.
+
+### Task Updates
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+For a configuration update, the Aurora Client calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a rolling batched update process by going through every batch
+and performing these operations:
+
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler the new config, then
+  the client diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+
+The Aurora client continues through the instance list until all tasks are
+updated, in `RUNNING,` and healthy for a configurable amount of time.
+If the client determines the update is not going well (a percentage of health
+checks have failed), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+### HTTP Health Checking and Graceful Shutdown
+
+The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
+this protocol by declaring a port named `health`.  Take for example this configuration snippet:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
+
+When this Process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
+a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
+also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
+requests:
+
+| HTTP request            | Description                             |
+| ------------            | -----------                             |
+| `GET /health`           | Inquires whether the task is healthy.   |
+| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
+| `POST /abortabortabort` | Final warning task is being killed.     |
+
+Please see the
+[configuration reference](/documentation/0.9.0/configuration-reference/#healthcheckconfig-objects) for
+configuration options for this feature.
+
+#### Snoozing Health Checks
+
+If you need to pause your health check, you can do so by touching a file inside of your sandbox,
+named `.healthchecksnooze`
+
+As long as that file is present, health checks will be disabled, enabling users to gather core dumps
+or other performance measurements without worrying about Aurora's health check killing their
+process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
+
+#### Tearing a task down
+
+The Executor follows an escalation sequence when killing a running task:
+
+  1. If `health` port is not present, skip to (5)
+  2. POST /quitquitquit
+  3. wait 5 seconds
+  4. POST /abortabortabort
+  5. Send SIGTERM (`kill`)
+  6. Send SIGKILL (`kill -9`)
+
+If the Executor notices that all Processes in a Task have aborted during this sequence, it will
+not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+Since production tasks are much more important, Aurora kills off the
+non-production task to free up resources for the production task. The
+scheduler UI shows the non-production task was preempted in favor of the
+production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world. `Or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the slave  a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+The scheduler has access to a non-public `RESTARTING` state. If a `Task`
+is forced into the `RESTARTING` state, the scheduler kills the
+underlying task but in parallel schedules an identical replacement for
+it.
+
+Configuration
+-------------
+
+You define and configure your Jobs (and their Tasks and Processes) in
+Aurora configuration files. Their filenames end with the `.aurora`
+suffix, and you write them in Python making use of the Pystachio
+templating language, along
+with specific Aurora, Mesos, and Thermos commands and methods. See the
+[Configuration Guide and Reference](/documentation/0.9.0/configuration-reference/) and
+[Configuration Tutorial](/documentation/0.9.0/configuration-tutorial/).
+
+Service Discovery
+-----------------
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](/documentation/0.9.0/configuration-reference/).
+
+Creating Jobs
+-------------
+
+You create and manipulate Aurora Jobs with the Aurora client, which starts all its
+command line commands with
+`aurora`. See [Aurora Client Commands](/documentation/0.9.0/client-commands/) for details
+about the Aurora Client.
+
+Interacting With Jobs
+---------------------
+
+You interact with Aurora jobs either via:
+
+- Read-only Web UIs
+
+  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
+
+  For example:
+
+      vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello \
+      /vagrant/examples/jobs/hello_world.aurora
+      INFO] Creating job hello
+      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
+  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
+
+  You can also reach the scheduler UI page via the Client command `aurora job open`:
+
+      aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
+  If the role is specified, it goes to the top-level role page. If the full job key is specified,
+  it goes directly to the job page where you can inspect individual tasks.
+
+  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
+  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
+  jobs and user accounts for test or development jobs.
+
+- The Aurora client
+
+  See [client commands](/documentation/0.9.0/client-commands/).
diff --git a/source/documentation/0.9.0/vagrant.md b/source/documentation/0.9.0/vagrant.md
new file mode 100644
index 0000000..0390a59
--- /dev/null
+++ b/source/documentation/0.9.0/vagrant.md
@@ -0,0 +1,137 @@
+Getting Started
+===============
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Slave - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](/documentation/0.9.0/tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
diff --git a/source/documentation/latest.html.md b/source/documentation/latest.html.md
deleted file mode 100644
index fe20536..0000000
--- a/source/documentation/latest.html.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Documentation
-
-## Introduction
-Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run long-running services that take advantage of Apache Mesos' scalability, fault-tolerance, and resource isolation. This documentation has been organized into sections with three audiences in mind:
- 
- * Users: General information about the project and to learn how to run an Aurora job.
- * Operators: For those that wish to manage and fine-tune an Aurora cluster.
- * Developers: All the information you need to start modifying Aurora and contributing back to the project.
-
-This documentation is a work in progress, and we encourage you to ask questions on the [Aurora developer list](http://aurora.incubator.apache.org/community/) or the `#aurora` IRC channel on `irc.freenode.net`.
-
-## Users
- * [Install Aurora on virtual machines on your private machine](/documentation/latest/vagrant/)
- * [Hello World Tutorial](/documentation/latest/tutorial/)
- * [User Guide](/documentation/latest/user-guide/)
- * [Configuration Tutorial](/documentation/latest/configuration-tutorial/)
- * [Aurora + Thermos Reference](/documentation/latest/configuration-reference/)
- * [Command Line Client](/documentation/latest/client-commands/)
- * [Aurora Client v2](/documentation/latest/clientv2/)
- * [Cron Jobs](/documentation/latest/cron-jobs/)
-
-## Operators
- * [Deploy Aurora](/documentation/latest/deploying-aurora-scheduler/)
- * [Monitoring](/documentation/latest/monitoring/)
- * [Hooks for Aurora Client API](/documentation/latest/hooks/)
- * [Scheduler Storage](/documentation/latest/storage/)
- * [Scheduler Storage and Maintenance](/documentation/latest/storage-config/)
- * [SLA Measurement](/documentation/latest/sla/)
- * [Resource Isolation and Sizing](/documentation/latest/resource-isolation/)
- * [Generating test resources](/documentation/latest/test-resource-generation/)
-
-## Developers
- * [Contributing to the project](/documentation/latest/contributing/)
- * [Developing the Aurora Scheduler](/documentation/latest/developing-aurora-scheduler/)
- * [Developing the Aurora Client](/documentation/latest/developing-aurora-client/)
- * [Committers Guide](/documentation/latest/committers/)
\ No newline at end of file
diff --git a/source/documentation/latest/additional-resources/presentations.md b/source/documentation/latest/additional-resources/presentations.md
new file mode 100644
index 0000000..e45df99
--- /dev/null
+++ b/source/documentation/latest/additional-resources/presentations.md
@@ -0,0 +1,80 @@
+# Apache Aurora Presentations
+Video and slides from presentations and panel discussions about Apache Aurora.
+
+_(Listed in date descending order)_
+
+<table>
+
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png" alt="Mesos and Aurora on a Small Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=q5iIqhaCJ_o">Mesos &amp; Aurora on a Small Scale (Video)</a></strong>
+		<p>Presented by Florian Pfeiffer</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png" alt="SLA Aware Maintenance for Operators Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=tZ0-SISvCis">SLA Aware Maintenance for Operators (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>October 8, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon-europe">#MesosCon Europe 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png" alt="Shipping Code with Aurora Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=y1hi7K1lPkk">Shipping Code with Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/09_20_2015_twitter_production_scale_thumb.png" alt="Twitter Production Scale Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=nNrh-gdu9m4">Twitter’s Production Scale: Mesos and Aurora Operations (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>August 20, 2015 at <a href="http://events.linuxfoundation.org/events/archive/2015/mesoscon">#MesosCon 2015</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/04_30_2015_monolith_to_microservices_thumb.png" alt="From Monolith to Microservices with Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=yXkOgnyK4Hw">From Monolith to Microservices w/ Aurora (Video)</a></strong>
+		<p>Presented by Thanos Baskous, Tony Dong, Dobromir Montauk</p>
+		<p>April 30, 2015 at <a href="http://www.meetup.com/Bay-Area-Apache-Aurora-Users-Group/events/221219480/">Bay Area Apache Aurora Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png" alt="Aurora + Mesos in Practice at Twitter Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=1XYJGX_qZVU">Aurora + Mesos in Practice at Twitter (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 07, 2015 at <a href="http://www.bigeng.io/aurora-mesos-in-practice-at-twitter">Bigcommerce TechTalk</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/02_28_2015_apache_aurora_thumb.png" alt="Apache Auroraの始めかた Slideshow Thumbnail" /></td>
+		<td><strong><a href="http://www.slideshare.net/zembutsu/apache-aurora-introduction-and-tutorial-osc15tk">Apache Auroraの始めかた (Slides)</a></strong>
+		<p>Presented by Masahito Zembutsu</p>
+		<p>February 28, 2015 at <a href="http://www.ospn.jp/osc2015-spring/">Open Source Conference 2015 Tokyo Spring</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png" alt="Apache Aurora Adopters Panel Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=2Jsj0zFdRlg">Apache Aurora Adopters Panel (Video)</a></strong>
+		<p>Panelists Ben Staffin, Josh Adams, Bill Farner, Berk Demir</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/02_19_2015_aurora_at_twitter_thumb.png" alt="Operating Apache Aurora and Mesos at Twitter Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=E4lxX6epM_U">Operating Apache Aurora and Mesos at Twitter (Video)</a></strong>
+		<p>Presented by Joe Smith</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png" alt="Apache Aurora and Mesos at TellApart" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=ZZXtXLvTXAE">Apache Aurora and Mesos at TellApart (Video)</a></strong>
+		<p>Presented by Steve Niemitz</p>
+		<p>February 19, 2015 at <a href="http://www.meetup.com/Bay-Area-Mesos-User-Group/events/220279080/">Bay Area Mesos Users Group</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/08_21_2014_past_present_future_thumb.png" alt="Past, Present, and Future of the Aurora Scheduler Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=Dsc5CPhKs4o">Past, Present, and Future of the Aurora Scheduler (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>August 21, 2014 at <a href="http://events.linuxfoundation.org/events/archive/2014/mesoscon">#MesosCon 2014</a></p></td>
+	</tr>
+	<tr>
+		<td><img src="/documentation/latest/images/presentations/03_25_2014_introduction_to_aurora_thumb.png" alt="Introduction to Apache Aurora Video Thumbnail" /></td>
+		<td><strong><a href="https://www.youtube.com/watch?v=asd_h6VzaJc">Introduction to Apache Aurora (Video)</a></strong>
+		<p>Presented by Bill Farner</p>
+		<p>March 25, 2014 at <a href="https://www.eventbrite.com/e/aurora-and-mesosframeworksmeetup-tickets-10850994617">Aurora and Mesos Frameworks Meetup</a></p></td>
+	</tr>
+</table>
diff --git a/source/documentation/latest/additional-resources/tools.md b/source/documentation/latest/additional-resources/tools.md
new file mode 100644
index 0000000..262d16d
--- /dev/null
+++ b/source/documentation/latest/additional-resources/tools.md
@@ -0,0 +1,24 @@
+# Tools
+
+Various tools integrate with Aurora. Is there a tool missing? Let us know, or submit a patch to add it!
+
+* Load-balancing technology used to direct traffic to services running on Aurora:
+  - [synapse](https://github.com/airbnb/synapse) based on HAProxy
+  - [aurproxy](https://github.com/tellapart/aurproxy) based on nginx
+  - [jobhopper](https://github.com/benley/aurora-jobhopper) performs HTTP redirects for easy developer and administrator access
+
+* RPC libraries that integrate with the Aurora's [service discovery mechanism](../../features/service-discovery/):
+  - [linkerd](https://linkerd.io/) RPC proxy
+  - [finagle](https://twitter.github.io/finagle) (Scala)
+  - [scales](https://github.com/steveniemitz/scales) (Python)
+
+* Monitoring:
+  - [collectd-aurora](https://github.com/zircote/collectd-aurora) for cluster monitoring using collectd
+  - [Prometheus Aurora exporter](https://github.com/tommyulfsparre/aurora_exporter) for cluster monitoring using Prometheus
+  - [Prometheus service discovery integration](http://prometheus.io/docs/operating/configuration/#zookeeper-serverset-sd-configurations-serverset_sd_config) for discovering and monitoring services running on Aurora
+
+* Packaging and deployment:
+  - [aurora-packaging](https://github.com/apache/aurora-packaging), the source of the official Aurora packages
+
+* Thrift Clients:
+  - [gorealis](https://github.com/paypal/gorealis) for communicating with the scheduler using Go
diff --git a/source/documentation/latest/client-commands.md b/source/documentation/latest/client-commands.md
deleted file mode 100644
index 809239f..0000000
--- a/source/documentation/latest/client-commands.md
+++ /dev/null
@@ -1,516 +0,0 @@
-Aurora Client Commands
-======================
-
-The most up-to-date reference is in the client itself: use the
-`aurora help` subcommand (for example, `aurora help` or
-`aurora help create`) to find the latest information on parameters and
-functionality. Note that `aurora help open` does not work, due to underlying issues with
-reflection.
-
-- [Introduction](#introduction)
-- [Cluster Configuration](#cluster-configuration)
-- [Job Keys](#job-keys)
-- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
-- [Regular Jobs](#regular-jobs)
-    - [Creating and Running a Job](#creating-and-running-a-job)
-    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
-    - [Killing a Job](#killing-a-job)
-    - [Updating a Job](#updating-a-job)
-        - [Asynchronous job updates (beta)](#user-content-asynchronous-job-updates-beta)
-    - [Renaming a Job](#renaming-a-job)
-    - [Restarting Jobs](#restarting-jobs)
-- [Cron Jobs](#cron-jobs)
-- [Comparing Jobs](#comparing-jobs)
-- [Viewing/Examining Jobs](#viewingexamining-jobs)
-    - [Listing Jobs](#listing-jobs)
-    - [Inspecting a Job](#inspecting-a-job)
-    - [Versions](#versions)
-    - [Checking Your Quota](#checking-your-quota)
-    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
-    - [Getting Job Status](#getting-job-status)
-    - [Opening the Web UI](#opening-the-web-ui)
-    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
-    - [Templating Command Arguments](#templating-command-arguments)
-
-Introduction
-------------
-
-Once you have written an `.aurora` configuration file that describes
-your Job and its parameters and functionality, you interact with Aurora
-using Aurora Client commands. This document describes all of these commands
-and how and when to use them. All Aurora Client commands start with
-`aurora`, followed by the name of the specific command and its
-arguments.
-
-*Job keys* are a very common argument to Aurora commands, as well as the
-gateway to useful information about a Job. Before using Aurora, you
-should read the next section which describes them in detail. The section
-after that briefly describes how you can modify the behavior of certain
-Aurora Client commands, linking to a detailed document about how to do
-that.
-
-This is followed by the Regular Jobs section, which describes the basic
-Client commands for creating, running, and manipulating Aurora Jobs.
-After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
-other words, various commands for getting information and metadata about
-Aurora Jobs.
-
-Cluster Configuration
----------------------
-
-The client must be able to find a configuration file that specifies available clusters. This file
-declares shorthand names for clusters, which are in turn referenced by job configuration files
-and client commands.
-
-The client will load at most two configuration files, making both of their defined clusters
-available. The first is intended to be a system-installed cluster, using the path specified in
-the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
-environment variable is not set. The second is a user-installed file, located at
-`~/.aurora/clusters.json`.
-
-A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
-communicates with a single (non-leader-elected) scheduler.  For example:
-
-```javascript
-[{
-  "name": "example",
-  "scheduler_uri": "localhost:55555",
-}]
-```
-
-A configuration for a leader-elected scheduler would contain something like:
-
-```javascript
-[{
-  "name": "example",
-  "zk": "192.168.33.7",
-  "scheduler_zk_path": "/aurora/scheduler"
-}]
-```
-
-For more details on cluster configuration see the
-[Client Cluster Configuration](/documentation/latest/client-cluster-configuration/) documentation.
-
-Job Keys
---------
-
-A job key is a unique system-wide identifier for an Aurora-managed
-Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
-consisting of, in order, *cluster*, *role*, *environment*, and
-*jobname*, separated by /s. Cluster is the name of an Aurora
-cluster. Role is the Unix service account under which the Job
-runs. Environment is a namespace component like `devel`, `test`,
-`prod`, or `stagingN.` Jobname is the Job's name.
-
-The combination of all four values uniquely specifies the Job. If any
-one value is different from that of another job key, the two job keys
-refer to different Jobs. For example, job key
-`cluster1/tyg/prod/workhorse` is different from
-`cluster1/tyg/prod/workcamel` is different from
-`cluster2/tyg/prod/workhorse` is different from
-`cluster2/foo/prod/workhorse` is different from
-`cluster1/tyg/test/workhorse.`
-
-Role names are user accounts existing on the slave machines. If you don't know what accounts
-are available, contact your sysadmin.
-
-Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
-
-Modifying Aurora Client Commands
---------------------------------
-
-For certain Aurora Client commands, you can define hook methods that run
-either before or after an action that takes place during the command's
-execution, as well as based on whether the action finished successfully or failed
-during execution. Basically, a hook is code that lets you extend the
-command's actions. The hook executes on the client side, specifically on
-the machine executing Aurora commands.
-
-Hooks can be associated with these Aurora Client commands.
-
-  - `cancel_update`
-  - `create`
-  - `kill`
-  - `restart`
-  - `update`
-
-The process for writing and activating them is complex enough
-that we explain it in a devoted document, [Hooks for Aurora Client API](/documentation/latest/hooks/).
-
-Regular Jobs
-------------
-
-This section covers Aurora commands related to running, killing,
-renaming, updating, and restarting a basic Aurora Job.
-
-### Creating and Running a Job
-
-    aurora create <job key> <configuration file>
-
-Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
-The configuration file may also contain and activate hook definitions.
-
-`create` can take four named parameters:
-
-- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
-  value. Multiple flags specify multiple values. Defaults to `[]`.
-- ` -o, --open_browser` Open a browser window to the scheduler UI Job
-  page after a job changing operation happens. When `False`, the Job
-  URL prints on the console and the user has to copy/paste it
-  manually. Defaults to `False`. Does not work when running in Vagrant.
-- ` -j, --json` If specified, configuration argument is read as a
-  string in JSON format. Defaults to False.
-- ` --wait_until=STATE` Block the client until all the Tasks have
-  transitioned into the requested state. Possible values are: `PENDING`,
-  `RUNNING`, `FINISHED`. Default: `PENDING`
-
-### Running a Command On a Running Job
-
-    aurora run <job_key> <cmd>
-
-Runs a shell command on all machines currently hosting shards of a
-single Job.
-
-`run` supports the same command line wildcards used to populate a Job's
-commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
-namespaces.
-
-`run` can take three named parameters:
-
-- `-t NUM_THREADS`, `--threads=NUM_THREADS `The number of threads to
-  use, defaulting to `1`.
-- `--user=SSH_USER` ssh as this user instead of the given role value.
-  Defaults to None.
-- `-e, --executor_sandbox`  Run the command in the executor sandbox
-  instead of the Task sandbox. Defaults to False.
-
-### Killing a Job
-
-    aurora kill <job key> <configuration file>
-
-Kills all Tasks associated with the specified Job, blocking until all
-are terminated. Defaults to killing all shards in the Job.
-
-The `<configuration file>` argument for `kill` is optional. Use it only
-if it contains hook definitions and activations that affect the
-kill command.
-
-`kill` can take two named parameters:
-
-- `-o, --open_browser` Open a browser window to the scheduler UI Job
-  page after a job changing operation happens. When `False`, the Job
-  URL prints on the console and the user has to copy/paste it
-  manually. Defaults to `False`. Does not work when running in Vagrant.
-- `--shards=SHARDS` A list of shard ids to act on. Can either be a
-  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
-  combination of the two (e.g. 0-2,5,7-9). Defaults to acting on all
-  shards.
-
-### Updating a Job
-
-    aurora update [--shards=ids] <job key> <configuration file>
-    aurora cancel_update <job key> <configuration file>
-
-Given a running job, does a rolling update to reflect a new
-configuration version. Only updates Tasks in the Job with a changed
-configuration. You can further restrict the operated on Tasks by
-using `--shards` and specifying a comma-separated list of job shard ids.
-
-You may want to run `aurora diff` beforehand to validate which Tasks
-have different configurations.
-
-Updating jobs are locked to be sure the update finishes without
-disruption. If the update abnormally terminates, the lock may stay
-around and cause failure of subsequent update attempts.
- `aurora cancel_update `unlocks the Job specified by
-its `job_key` argument. Be sure you don't issue `cancel_update` when
-another user is working with the specified Job.
-
-The `<configuration file>` argument for `cancel_update` is optional. Use
-it only if it contains hook definitions and activations that affect the
-`cancel_update` command. The `<configuration file>` argument for
-`update` is required, but in addition to a new configuration it can be
-used to define and activate hooks for `update`.
-
-`update` can take four named parameters:
-
-- `--shards=SHARDS` A list of shard ids to update. Can either be a
-  comma-separated list (e.g. 0,1,2) or a range (e.g. 0-2) or  any
-  combination of the two (e.g. 0-2,5,7-9). If not  set, all shards are
-  acted on. Defaults to None.
-- `-E NAME=VALUE` Binds a Thermos mustache variable name to a value.
-  Use multiple flags to specify multiple values. Defaults to `[]`.
-- `-j, --json` If specified, configuration is read in JSON format.
-  Defaults to `False`.
-- `--updater_health_check_interval_seconds=HEALTH_CHECK_INTERVAL_SECONDS`
-  Time interval between subsequent shard status checks. Defaults to `3`.
-
-#### Asynchronous job updates (beta)
-
-As of 0.6.0, Aurora will coordinate updates (and rollbacks) within the
-scheduler. Performing updates this way also allows the scheduler to display
-update progress and job update history in the browser.
-
-There are several sub-commands to manage job updates:
-
-    aurora2 beta-update start <job key> <configuration file
-    aurora2 beta-update status <job key>
-    aurora2 beta-update pause <job key>
-    aurora2 beta-update resume <job key>
-    aurora2 beta-update abort <job key>
-    aurora2 beta-update list <cluster>
-
-When you `start` a job update, the command will return once it has sent the
-instructions to the scheduler.  At that point, you may view detailed
-progress for the update with the `status` subcommand, in addition to viewing
-graphical progress in the web browser.  You may also get a full listing of
-in-progress updates in a cluster with `list`.
-
-Once an update has been started, you can `pause` to keep the update but halt
-progress.  This can be useful for doing things like debug a  partially-updated
-job to determine whether you would like to proceed.  You can `resume` to
-proceed.
-
-You may `abort` a job update regardless of the state it is in. This will
-instruct the scheduler to completely abandon the job update and leave the job
-in the current (possibly partially-updated) state.
-
-### Renaming a Job
-
-Renaming is a tricky operation as downstream clients must be informed of
-the new name. A conservative approach
-to renaming suitable for production services is:
-
-1.  Modify the Aurora configuration file to change the role,
-    environment, and/or name as appropriate to the standardized naming
-    scheme.
-2.  Check that only these naming components have changed
-    with `aurora diff`.
-
-        aurora diff <job_key> <job_configuration>
-
-3.  Create the (identical) job at the new key. You may need to request a
-    temporary quota increase.
-
-        aurora create <new_job_key> <job_configuration>
-
-4.  Migrate all clients over to the new job key. Update all links and
-    dashboards. Ensure that both job keys run identical versions of the
-    code while in this state.
-5.  After verifying that all clients have successfully moved over, kill
-    the old job.
-
-        aurora kill <old_job_key>
-
-6.  If you received a temporary quota increase, be sure to let the
-    powers that be know you no longer need the additional capacity.
-
-### Restarting Jobs
-
-`restart` restarts all of a job key identified Job's shards:
-
-    aurora restart <job_key> <configuration file>
-
-Restarts are controlled on the client side, so aborting
-the `restart` command halts the restart operation.
-
-`restart` does a rolling restart. You almost always want to do this, but
-not if all shards of a service are misbehaving and are
-completely dysfunctional. To not do a rolling restart, use
-the `-shards` option described below.
-
-**Note**: `restart` only applies its command line arguments and does not
-use or is affected by `update.config`. Restarting
-does ***not*** involve a configuration change. To update the
-configuration, use `update.config`.
-
-The `<configuration file>` argument for restart is optional. Use it only
-if it contains hook definitions and activations that affect the
-`restart` command.
-
-In addition to the required job key argument, there are eight
-`restart` specific optional arguments:
-
-- `--updater_health_check_interval_seconds`: Defaults to `3`, the time
-  interval between subsequent shard status checks.
-- `--shards=SHARDS`: Defaults to None, which restarts all shards.
-  Otherwise, only the specified-by-id shards restart. They can be
-  comma-separated `(0, 8, 9)`, a range `(3-5)` or a
-  combination `(0, 3-5, 8, 9-11)`.
-- `--batch_size`: Defaults to `1`, the number of shards to be started
-  in one iteration. So, for example, for value 3, it tries to restart
-  the first three shards specified by `--shards` simultaneously, then
-  the next three, and so on.
-- `--max_per_shard_failures=MAX_PER_SHARD_FAILURES`: Defaults to `0`,
-  the maximum number of restarts per shard during restart. When
-  exceeded, it increments the total failure count.
-- `--max_total_failures=MAX_TOTAL_FAILURES`: Defaults to `0`, the
-  maximum total number of shard failures tolerated during restart.
-- `-o, --open_browser` Open a browser window to the scheduler UI Job
-  page after a job changing operation happens. When `False`, the Job
-  url prints on the console and the user has to copy/paste it
-  manually. Defaults to `False`. Does not work when running in Vagrant.
-- `--restart_threshold`: Defaults to `60`, the maximum number of
-  seconds before a shard must move into the `RUNNING` state before
-  it's considered a failure.
-- `--watch_secs`: Defaults to `45`, the minimum number of seconds a
-  shard must remain in `RUNNING` state before considered a success.
-
-Cron Jobs
----------
-
-You will see various commands and options relating to cron jobs in
-`aurora -help` and similar. Ignore them, as they're not yet implemented.
-You might be able to use them without causing an error, but nothing happens
-if you do.
-
-Comparing Jobs
---------------
-
-    aurora diff <job_key> config
-
-Compares a job configuration against a running job. By default the diff
-is determined using `diff`, though you may choose an alternate
- diff program by specifying the `DIFF_VIEWER` environment variable.
-
-There are two named parameters:
-
-- `-E NAME=VALUE` Bind a Thermos mustache variable name to a
-  value. Multiple flags may be used to specify multiple values.
-  Defaults to `[]`.
-- `-j, --json` Read the configuration argument in JSON format.
-  Defaults to `False`.
-
-Viewing/Examining Jobs
-----------------------
-
-Above we discussed creating, killing, and updating Jobs. Here we discuss
-how to view and examine Jobs.
-
-### Listing Jobs
-
-    aurora list_jobs
-    Usage: `aurora list_jobs cluster/role
-
-Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
-
-It has two named parameters:
-
-- `--pretty`: Displays job information in prettyprinted format.
-  Defaults to `False`.
-- `-c`, `--show-cron`: Shows cron schedule for jobs. Defaults to
-  `False`. Do not use, as it's not yet implemented.
-
-### Inspecting a Job
-
-    aurora inspect <job_key> config
-
-`inspect` verifies that its specified job can be parsed from a
-configuration file, and displays the parsed configuration. It has four
-named parameters:
-
-- `--local`: Inspect the configuration that the  `spawn` command would
-  create, defaulting to `False`.
-- `--raw`: Shows the raw configuration. Defaults to `False`.
-- `-j`, `--json`: If specified, configuration is read in JSON format.
-  Defaults to `False`.
-- `-E NAME=VALUE`: Bind a Thermos Mustache variable name to a value.
-  You can use multiple flags to specify multiple values. Defaults
-  to `[]`
-
-### Versions
-
-    aurora version
-
-Lists client build information and what Aurora API version it supports.
-
-### Checking Your Quota
-
-    aurora get_quota --cluster=CLUSTER role
-
-  Prints the production quota allocated to the role's value at the given
-cluster.
-
-### Finding a Job on Web UI
-
-When you create a job, part of the output response contains a URL that goes
-to the job's scheduler UI page. For example:
-
-    vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
-    INFO] Creating job hello
-    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
-    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
-
-You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
-You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
-
-Once you click through to a role page, you see Jobs arranged
-separately by pending jobs, active jobs and finished jobs.
-Jobs are arranged by role, typically a service account for
-production jobs and user accounts for test or development jobs.
-
-### Getting Job Status
-
-    aurora status <job_key>
-
-Returns the status of recent tasks associated with the
-`job_key` specified Job in its supplied cluster. Typically this includes
-a mix of active tasks (running or assigned) and inactive tasks
-(successful, failed, and lost.)
-
-### Opening the Web UI
-
-Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
-machines individual tasks are scheduled. You can open the web UI via the
-`open` command line command if invoked from your machine:
-
-    aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
-
-If only the cluster is specified, it goes directly to that cluster's
-scheduler main page. If the role is specified, it goes to the top-level
-role page. If the full job key is specified, it goes directly to the job
-page where you can inspect individual tasks.
-
-### SSHing to a Specific Task Machine
-
-    aurora ssh <job_key> <shard number>
-
-You can have the Aurora client ssh directly to the machine that has been
-assigned a particular Job/shard number. This may be useful for quickly
-diagnosing issues such as performance issues or abnormal behavior on a
-particular machine.
-
-It can take three named parameters:
-
-- `-e`, `--executor_sandbox`:  Run `ssh` in the executor sandbox
-  instead of the  task sandbox. Defaults to `False`.
-- `--user=SSH_USER`: `ssh` as the given user instead of as the role in
-  the `job_key` argument. Defaults to none.
-- `-L PORT:NAME`: Add tunnel from local port `PORT` to the remote
-  named port  `NAME`. Defaults to `[]`.
-
-### Templating Command Arguments
-
-    aurora run [-e] [-t THREADS] <job_key> -- <<command-line>>
-
-Given a job specification, run the supplied command on all hosts and
-return the output. You may use the standard Mustache templating rules:
-
-- `{{thermos.ports[name]}}` substitutes the specific named port of the
-  task assigned to this machine
-- `{{mesos.instance}}` substitutes the shard id of the job's task
-  assigned to this machine
-- `{{thermos.task_id}}` substitutes the task id of the job's task
-  assigned to this machine
-
-For example, the following type of pattern can be a powerful diagnostic
-tool:
-
-    aurora run -t5 cluster1/tyg/devel/seizure -- \
-      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
-
-By default, the command runs in the Task's sandbox. The `-e` option can
-run the command in the executor's sandbox. This is mostly useful for
-Aurora administrators.
-
-You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/latest/clientv2.md b/source/documentation/latest/clientv2.md
deleted file mode 100644
index 471d80c..0000000
--- a/source/documentation/latest/clientv2.md
+++ /dev/null
@@ -1,406 +0,0 @@
-Aurora Client v2
-=================
-
-Overview
------------
-
-Our goal is to replace the current Aurora command-line client. The
-current client suffers from an early monolithic structure, and a long
-development history of rapid unplanned evolution.
-
-In addition to its internal problems, the current Aurora client is
-confusing for users. There are several different kinds of objects
-manipulated by the Aurora command line, and the difference between
-them is often not clear. (What's the difference between a job and a
-configuration?) For each type of object, there are different commands,
-and it's hard to remember which command should be used for which kind
-of object.
-
-Instead of continuing to let the Aurora client develop and evolve
-randomly, it's time to take a principled look at the Aurora command
-line, and figure out how to make our command line processing make
-sense. At the same time, the code needs to be cleaned up, and divided
-into small comprehensible units based on a plugin architecture.
-
-Doing this now will give us a more intuitive, consistent, and easy to
-use client, as well as a sound platform for future development.
-
-Goals
--------
-
-* A command line tool for interacting with Aurora that is easy for
-  users to understand.
-* A noun/verb command model.
-* A modular source-code architecture.
-* Non-disruptive transition for users.
-
-Non-Goals
-----------
-
-* The most important non-goal is that we're not trying to redesign the
-  Aurora scheduler, the Aurora executor, or any of the peripheral tools
-  that the Aurora command line interacts with; we only want to create a
-  better command line client.
-* We do not want to change thermos, mesos, hadoop, etc.
-* We do not want to create new objects that users will work with to
-  interact with Mesos or Aurora.
-* We do not want to change Aurora job configuration files or file formats.
-* We do not want to change the Aurora API.
-* We don't want to boil the ocean: there are many things that we could
-  include in the scope of this project, but we don't want to be
-  distracted by re-implementing all of twitter.commons in order to
-  create a perfect Aurora client.
-
-
-Background
------------
-
-Aurora is a system that's used to run and manage services and
-service-like jobs running in a datacenter. Aurora takes care of
-allocating resources in order to schedule and run jobs without
-requiring teams to manage dedicated hardware. The heart of Aurora is
-called the scheduler, and is responsible for finding and assigning
-resources to tasks.
-
-The Aurora scheduler provides a thrift API. The scheduler API is
-low-level and difficult to interact with. Users do not interact
-directly with the Aurora API; instead, they use a command-line tool,
-which provides a collection of easy-to-use commands. This command-line
-tool, in turn, talks to the scheduler API to launch and manage jobs in
-datacenter clusters. The command-line tool is called the Aurora
-client.
-
-The current implementation of the Aurora client is haphazard,
-and really needs to be cleaned up:
-
-- The code is monolithic and hard to maintain. It's implemented using
-  `twitter.common.app`, which assumes that all of the command code lives
-  in a single source file. To work around this, and allow some
-  subdivision, it uses a hack of `twitter.common.app` to force
-  registration of commands from multiple modules. It's hard to
-  understand, and hard to modify.
-- The current code is very difficult to test. Because of the way it's
-  built, there is no consistent way of passing key application data
-  around. As a result, each unit test of client operations needs a
-  difficult-to-assemble custom setup of mock objects.
-- The current code handles errors poorly, and it is difficult to
-  fix. Many common errors produce unacceptable results. For example,
-  issuing an unknown command generates an error message "main takes 0
-  parameters but received 1"; passing an invalid parameter to other
-  commands frequently produces a stack trace.
-- The current command line is confusing for users. There are several
-  different kinds of objects manipulated by the Aurora command line,
-  and the difference between them is often not entirely clear. (What's
-  the difference between a job and a configuration?)
-  For each type of object, there are different
-  commands, and it's frequently not clear just which command should be
-  used for which object.
-
-
-Instead of continuing to let it develop and evolve randomly, it's time
-to take a principled look at the Aurora command line, and figure out
-how to make command line processing make sense. At the same time, the
-code needs to be cleaned up, and divided into small comprehensible
-units based on a plugin architecture.
-
-Requirements
--------------
-
-Aurora is aimed at engineers who run jobs and services in a
-datacenter. As a result, the requirements for the aurora client are
-all engineering focused:
-
-* __Consistency__: commands should follow a consistent structure, so that
-  users can apply knowledge and intuition gained from working with
-  some aurora commands to new commands. This means that when commands
-  can re-use the same options, they should; that objects should be
-  referred to by consistent syntax throughout the tool.
-* __Helpfulness__: commands should be structured so that the system can
-  generate helpful error messages. If a user just runs "aurora", they
-  should get a basic usage message. If they try to run an invalid
-  command, they should get a message that the command is invalid, not
-  a stack dump or "command main() takes 0 parameters but received
-  2". Commands should not generate extraneous output that obscures the
-  key facts that the user needs to know, and the default behavior of
-  commands should not generate outputs that will be routinely ignored
-  by users.
-* __Extensibility__: it should be easy to plug in new commands,
-  including custom commands, to adapt the Aurora client to new
-  environments.
-* __Script-friendly command output__: every command should at least include
-  an option that generates output that's script-friendly. Scripts should be
-  able to work with command-output without needing to do screen scraping.
-* __Scalability__: the tools should be usable for any foreseeable size
-  of Aurora datacenters and machine clusters.
-
-Design Overview
------------------
-
-The Aurora client will be reimplemented using a noun-verb model,
-similar to the cmdlet model used by Monad/Windows Powershell. Users
-will work by providing a noun for the type of object being operated
-on, and a verb for the specific operation being performed on the
-object, followed by parameters. For example, to create a job, the user
-would execute: "`aurora job create smfd/mchucarroll/devel/jobname
-job.aurora`". The noun is `job` and the verb is `create`.
-
-The client will be implemented following that noun-verb
-convention. Each noun will be a separate component, which can be
-registered into the command-line framework. Each verb will be
-implemented by a class that registers with the appropriate noun. Nouns
-and verbs will each provide methods that add their command line
-options and parameters to the options parser, using the Python
-argparse library.
-
-Detailed Design
------------------
-
-### Interface
-
-In this section, we'll walk through the types of objects that the
-client can manipulate, and the operations that need to be provided for
-each object. These form the primary interface that engineers will use
-to interact with Aurora.
-
-In the command-line, each of the object types will have an Aurora
-subcommand. The commands to manipulate the object type will follow the
-type. For example, here are several commands in the old syntax
-contrasted against the new noun/verb syntax.
-
-* Get quota for a role:
-   * Noun/Verb syntax:  `aurora quota get west/www-data`
-   * Old syntax: `aurora get_quota --cluster=smf1 www-data`
-* Create job:
-   * Noun/Verb syntax: `aurora job create west/www-data/test/job job.aurora`
-   * Old syntax: `aurora create west/www-data/test/job job.aurora`
-* Schedule a job to run at a specific interval:
-   * Noun/verb: `aurora cron schedule east/www-data/test/job job.aurora`
-   * Old: `aurora create east/www-data/test/job job.aurora`
-
-As you can see in these examples, the new syntax is more consistent:
-you always specify the cluster where a command executes as part of an
-identifier, where in the old syntax, it was sometimes part of the
-jobkey and sometimes specified with a "--cluster" option.
-
-The new syntax is also more clear and explicit: even without knowing
-much about Aurora, it's clear what objects each command is acting on,
-where in the old syntax, commands like "create" are unclear.
-
-### The Job Noun
-
-A job is a configured program ready to run in Aurora. A job is,
-conceptually, a task factory: when a job is submitted to the Aurora
-scheduler, it creates a collection of tasks. The job contains a
-complete description of everything it needs to create a collection of
-tasks. (Note that this subsumes "service" commands. A service is just
-a task whose configuration sets the is_service flag, so we don't have
-separate commands for working with services.) Jobs are specified using
-`cluster/role/env/name` jobkey syntax.
-
-* `aurora job create *jobkey* *config*`:  submits a job to a cluster, launching the task(s) specified by the job config.
-* `aurora job status *jobkey*`: query job status. Prints information about the job,
-  whether it's running, etc., to standard out. If jobkey includes
-  globs, it should list all jobs that match the glob
-* `aurora job kill *jobkey*/*instanceids*`: kill/stop some of a jobs instances. This stops a job' tasks; if the job
-  has service tasks, they'll be  disabled, so that they won't restart.
-* `aurora job killall *jobkey*`: kill all of the instances of a job. This
-  is distinct from the *kill* command as a safety measure: omitting the
-  instances from a kill command shouldn't result in destroying the entire job.
-* `aurora job restart *jobkey*`: conceptually, this will kill a job, and then
-  launch it again. If the job does not exist, then fail with an error
-  message.  In fact, the underlying implementation does the
-  kill/relaunch on a rolling basis - so it's not an immediate kill of
-  all shards/instances, followed by a delay as all instances relaunch,
-  but rather a controlled gradual process.
-* `aurora job list *jobkey*`: list all jobs that match the jobkey spec that are
-  registered with the scheduler. This will include both jobs that are
-  currently running, and jobs that are scheduled to run at a later
-  time. The job key can be partial: if it specifies cluster, all jobs
-  on the cluster will be listed; cluster/role, all jobs running on the cluster under the role will be listed, etc.
-
-The Schedule Noun (Cron)
---------------------------
-
-Note (3/21/2014): The "cron" noun is _not_ implemented yet.
-
-Cron is a scheduler adjunct that periodically runs a job on a
-schedule. The cron commands all manipulate cron schedule entries. The
-schedules are specified as a part of the job configuration.
-
-* `aurora cron schedule jobkey config`: schedule a job to run by cron. If a cron job already exists
-replaces its template with a new one.
-* `aurora cron deschedule jobkey`: removes a jobs entry from the cron schedule.
-* `aurora cron status jobkey`: query for a scheduled job's status.
-
-The Quota Noun
----------------
-
-A quota is a data object maintained by the scheduler that specifies the maximum
-resources that may be consumed by jobs owned by a particular role. In the future,
-we may add new quota types. At some point, we'll also probably add an administrators
-command to set quotas.
-
-* `aurora quota get *cluster/role*`
-
-
-Implementation
----------------
-
-The current command line is monolithic. Every command on an Aurora
-object is a top-level command in the Aurora client. In the
-restructured command line, each of the primary object types
-manipulated by Aurora should have its own sub-command.
-
-* Advantages of this approach:
-   * Easier to detangle the command-line processing. The top-level
-     command-processing will be a small set of subcommand
-     processors. Option processing for each subcommand can be offloaded
-     to a separate module.
-   * The aurora top-level help command will be much more
-     comprehensible. Instead of giving a huge list of every possible
-     command, it will present the list of top-level object types, and
-     then users can request help on the commands for a specific type
-     of object.
-   * The sub-commands can be separated into distinct command-line
-     tools when appropriate.
-
-### Command Structure and Options Processing
-
-The implementation will follow closely on Pants goals. Pants goals use
-a static registration system to add new subcommands. In pants, each
-goal command is an implementation of a command interface, and provides
-implementations of methods to register options and parameters, and to
-actually execute the command. In this design, commands are modular and
-easy to implement, debug, and combine in different ways.
-
-For the Aurora client, we plan to use a two-level variation of the
-basic concept from pants. At the top-level we will have nouns. A noun
-will define some common command-line parameters required by all of its
-verbs, and will provide a registration hook for attaching verbs. Nouns
-will be implemented as a subclass of a basic Noun type.
-
-Each verb will, similarly, be implemented as a subclass of Verb. Verbs
-will be able to specify command-line options and parameters.
-
-Both `Noun` and `Verb` will be subclasses of a common base-class `AuroraCommand`:
-
-    class AuroraCommand(object):
-      def get_options(self):
-      """Gets the set of command-line options objects for this command.
-      The result is a list of CommandOption objects.
-       """
-        pass
-
-      @property
-      def help(self):
-        """Returns the help message for this command"""
-
-      @property
-      def usage(self):
-        """Returns a short usage description of the command"""
-
-      @property
-      def name(self):
-        """Returns the command name"""
-
-
-A command-line tool will be implemented as an instance of a `CommandLine`:
-
-    class CommandLine(object):
-      """The top-level object implementing a command-line application."""
-
-      @property
-      def name(self):
-        """Returns the name of this command-line tool"""
-
-      def print_out(self, str):
-        print(str)
-
-      def print_err(self, str):
-        print(str, file=sys.stderr)
-
-      def register_noun(self, noun):
-        """Adds a noun to the application"""
-
-      def register_plugin(self, plugin):
-	     """Adds a configuration plugin to the system"""
-
-
-Nouns are registered into a command-line using the `register_noun`
-method. They are weakly coupled to the application, making it easy to
-use a single noun in several different command-line tools. Nouns allow
-the registration of verbs using the `register_verb` method.
-
-When commands execute, they're given an instance of a *context object*.
-The context object must be an instance of a subclass of `AuroraCommandContext`.
-Options, parameters, and IO are all accessed using the context object. The context
-is created dynamically by the noun object owning the verb being executed. Developers
-are strongly encouraged to implement custom contexts for their nouns, and move functionality
-shared by the noun's verbs into the context object. The context interface is:
-
-    class Context(object):
-      class Error(Exception): pass
-
-      class ArgumentException(Error): pass
-
-      class CommandError(Error):
-
-      @classmethod
-      def exit(cls, code, msg):
-	    """Exit the application with an error message"""
-        raise cls.CommandError(code, msg)
-
-     def print_out(self, msg, indent=0):
-       """Prints a message to standard out, with an indent"""
-
-     def print_err(self, msg, indent=0):
-       """Prints a message to standard err, with an indent"""
-
-
-In addition to nouns and verbs, there's one more kind of registerable
-component, called a *configuration plugin*. These objects add a set of
-command-line options that can be passed to *all* of the commands
-implemented in the tool. Before the command is executed, the
-configuration plugin will be invoked, and will process its
-command-line arguments. This is useful for general configuration
-changes, like establish a secure tunnel to talk to machines in a
-datacenter. (A useful way to think of a plugin is as something like an
-aspect that can be woven in to aurora to provide environment-specific
-configuration.) A configuration plugin is implemented as an instance
-of class `ConfigurationPlugin`, and registered with the
-`register_plugin` method of the `CommandLine` object. The interface of
-a plugin is:
-
-    class ConfigurationPlugin(object):
-      """A component that can be plugged in to a command-line."""
-
-      @abstractmethod
-      def get_options(self):
-        """Return the set of options processed by this plugin"""
-
-      @abstractmethod
-      def execute(self, context):
-        """Run the context/command line initialization code for this plugin."""
-
-
-### Command Execution
-
-The options process and command execution is built as a facade over Python's
-standard argparse. All of the actual argument processing is done by the
-argparse library.
-
-Once the options are processed, the framework will start to execute the command. Command execution consists of:
-
-# Create a context object. The framework will use the argparse options to identify
-  which noun is being invoked, and will call that noun's `create_context` method.
-  The argparse options object will be stored in the context.
-# Execute any configuration plugins. Before any command is invoked, the framework
-  will first iterate over all of the registered configuration plugins. For each
-  plugin, it will invoke the `execute` method.
-# The noun will use the context to find out what verb is being invoked, and it will
-  then call that verb's `execute` method.
-# The command will exit. Its return code will be whatever was returned by the verb's
-  `execute` method.
-
-Commands are expected to return a code from a list of standard exit codes,
-which can be found in `src/main/python/apache/aurora/client/cli/__init__.py`.
diff --git a/source/documentation/latest/committers.md b/source/documentation/latest/committers.md
deleted file mode 100644
index 23a25c1..0000000
--- a/source/documentation/latest/committers.md
+++ /dev/null
@@ -1,79 +0,0 @@
-Setting up your email account
------------------------------
-Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
-email forwarding address at
-
-  http://id.apache.org
-
-Additional instructions for setting up your new committer email can be found at
-
-  http://www.apache.org/dev/user-email.html
-
-The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
-emails to your @apache.org email address.
-
-
-Creating a gpg key for releases
--------------------------------
-In order to create a release candidate you will need a gpg key published to an external key server
-and that key will need to be added to our KEYS file as well.
-
-1. Create a key:
-
-               gpg --gen-key
-
-2. Add your gpg key to the Apache Aurora KEYS file:
-
-               git clone https://git-wip-us.apache.org/repos/asf/incubator-aurora.git
-               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
-               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
-               ./rbt post -o -g
-
-3. Publish the key to an external key server:
-
-               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
-
-4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
-
-               https://dist.apache.org/repos/dist/dev/incubator/aurora/KEYS
-               https://dist.apache.org/repos/dist/release/incubator/aurora/KEYS
-
-5. Add your key to git config for use with the release scripts:
-
-               git config --global user.signingkey <KEY ID>
-
-
-Creating a release
-------------------
-The following will guide you through the steps to create a release candidate, vote, and finally an
-official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
-must have access to commit to the dist.a.o repositories.
-
-1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
-Version in Jira, the changelog script will use this to generate the CHANGELOG in step #2.
-
-2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
-branch and update the current version within the trunk. To create a minor version update and publish
-it run
-
-               ./build-support/release/release-candidate -l m -p
-
-3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
-send the [VOTE] email to the dev@ and private@ mailing lists. You can verify the release signature
-and checksums by running
-
-				./build-support/release/verify-release-candidate
-
-4. Wait for the vote to complete. If the vote fails address any issues and go back to step #1 and
-run again, this time you will use the -r flag to increment the release candidate version. This will
-automatically clean up the release candidate rc0 branch and source distribution.
-
-               ./build-support/release/release-candidate -l m -r 1 -p
-
-5. Once the vote has successfully passed create the release
-
-               ./build-support/release/release
-
-6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
-all binding votes and send the [RESULT][VOTE] email to the dev@ and private@ mailing lists.
-
diff --git a/source/documentation/latest/configuration-reference.md b/source/documentation/latest/configuration-reference.md
deleted file mode 100644
index 91aaf24..0000000
--- a/source/documentation/latest/configuration-reference.md
+++ /dev/null
@@ -1,538 +0,0 @@
-Aurora + Thermos Configuration Reference
-========================================
-
-- [Aurora + Thermos Configuration Reference](#aurora--thermos-configuration-reference)
-- [Introduction](#introduction)
-- [Process Schema](#process-schema)
-    - [Process Objects](#process-objects)
-      - [name](#name)
-      - [cmdline](#cmdline)
-      - [max_failures](#max_failures)
-      - [daemon](#daemon)
-      - [ephemeral](#ephemeral)
-      - [min_duration](#min_duration)
-      - [final](#final)
-- [Task Schema](#task-schema)
-    - [Task Object](#task-object)
-      - [name](#name-1)
-      - [processes](#processes)
-        - [constraints](#constraints)
-      - [resources](#resources)
-      - [max_failures](#max_failures-1)
-      - [max_concurrency](#max_concurrency)
-      - [finalization_wait](#finalization_wait)
-    - [Constraint Object](#constraint-object)
-    - [Resource Object](#resource-object)
-- [Job Schema](#job-schema)
-    - [Job Objects](#job-objects)
-    - [Services](#services)
-    - [UpdateConfig Objects](#updateconfig-objects)
-    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
-    - [Announcer Objects](#announcer-objects)
-- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
-- [Template Namespaces](#template-namespaces)
-    - [mesos Namespace](#mesos-namespace)
-    - [thermos Namespace](#thermos-namespace)
-- [Basic Examples](#basic-examples)
-    - [hello_world.aurora](#hello_worldaurora)
-    - [Environment Tailoring](#environment-tailoring)
-      - [hello_world_productionized.aurora](#hello_world_productionizedaurora)
-
-Introduction
-============
-
-Don't know where to start? The Aurora configuration schema is very
-powerful, and configurations can become quite complex for advanced use
-cases.
-
-For examples of simple configurations to get something up and running
-quickly, check out the [Tutorial](/documentation/latest/tutorial/). When you feel comfortable with the basics, move
-on to the [Configuration Tutorial](/documentation/latest/configuration-tutorial/) for more in-depth coverage of
-configuration design.
-
-For additional basic configuration examples, see [the end of this document](#BasicExamples).
-
-Process Schema
-==============
-
-Process objects consist of required `name` and `cmdline` attributes. You can customize Process
-behavior with its optional attributes. Remember, Processes are handled by Thermos.
-
-### Process Objects
-
-  **Attribute Name**  | **Type**    | **Description**
-  ------------------- | :---------: | ---------------------------------
-   **name**           | String      | Process name (Required)
-   **cmdline**        | String      | Command line (Required)
-   **max_failures**   | Integer     | Maximum process failures (Default: 1)
-   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
-   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
-   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 15)
-   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
-
-#### name
-
-The name is any valid UNIX filename string (specifically no
-slashes, NULLs or leading periods). Within a Task object, each Process name
-must be unique.
-
-#### cmdline
-
-The command line run by the process. The command line is invoked in a bash
-subshell, so can involve fully-blown bash scripts. However, nothing is
-supplied for command-line arguments so `$*` is unspecified.
-
-#### max_failures
-
-The maximum number of failures (non-zero exit statuses) this process can
-have before being marked permanently failed and not retried. If a
-process permanently fails, Thermos looks at the failure limit of the task
-containing the process (usually 1) to determine if the task has
-failed as well.
-
-Setting `max_failures` to 0 makes the process retry
-indefinitely until it achieves a successful (zero) exit status.
-It retries at most once every `min_duration` seconds to prevent
-an effective denial of service attack on the coordinating Thermos scheduler.
-
-#### daemon
-
-By default, Thermos processes are non-daemon. If `daemon` is set to True, a
-successful (zero) exit status does not prevent future process runs.
-Instead, the process reinvokes after `min_duration` seconds.
-However, the maximum failure limit still applies. A combination of
-`daemon=True` and `max_failures=0` causes a process to retry
-indefinitely regardless of exit status. This should be avoided
-for very short-lived processes because of the accumulation of
-checkpointed state for each process run. When running in Mesos
-specifically, `max_failures` is capped at 100.
-
-#### ephemeral
-
-By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
-True, the process' status is not used to determine if its containing task
-has completed. For example, consider a task with a non-ephemeral
-webserver process and an ephemeral logsaver process
-that periodically checkpoints its log files to a centralized data store.
-The task is considered finished once the webserver process has
-completed, regardless of the logsaver's current status.
-
-#### min_duration
-
-Processes may succeed or fail multiple times during a single task's
-duration. Each of these is called a *process run*. `min_duration` is
-the minimum number of seconds the scheduler waits before running the
-same process.
-
-#### final
-
-Processes can be grouped into two classes: ordinary processes and
-finalizing processes. By default, Thermos processes are ordinary. They
-run as long as the task is considered healthy (i.e., no failure
-limits have been reached.) But once all regular Thermos processes
-finish or the task reaches a certain failure threshold, it
-moves into a "finalization" stage and runs all finalizing
-processes. These are typically processes necessary for cleaning up the
-task, such as log checkpointers, or perhaps e-mail notifications that
-the task completed.
-
-Finalizing processes may not depend upon ordinary processes or
-vice-versa, however finalizing processes may depend upon other
-finalizing processes and otherwise run as a typical process
-schedule.
-
-Task Schema
-===========
-
-Tasks fundamentally consist of a `name` and a list of Process objects stored as the
-value of the `processes` attribute. Processes can be further constrained with
-`constraints`. By default, `name`'s value inherits from the first Process in the
-`processes` list, so for simple `Task` objects with one Process, `name`
-can be omitted. In Mesos, `resources` is also required.
-
-### Task Object
-
-   **param**               | **type**                         | **description**
-   ---------               | :---------:                      | ---------------
-   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
-   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
-   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
-   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
-   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
-   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
-   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
-
-#### name
-`name` is a string denoting the name of this task. It defaults to the name of the first Process in
-the list of Processes associated with the `processes` attribute.
-
-#### processes
-
-`processes` is an unordered list of `Process` objects. To constrain the order
-in which they run, use `constraints`.
-
-##### constraints
-
-A list of `Constraint` objects. Currently it supports only one type,
-the `order` constraint. `order` is a list of process names
-that should run in the order given. For example,
-
-        process = Process(cmdline = "echo hello {{name}}")
-        task = Task(name = "echoes",
-                    processes = [process(name = "jim"), process(name = "bob")],
-                    constraints = [Constraint(order = ["jim", "bob"]))
-
-Constraints can be supplied ad-hoc and in duplicate. Not all
-Processes need be constrained, however Tasks with cycles are
-rejected by the Thermos scheduler.
-
-Use the `order` function as shorthand to generate `Constraint` lists.
-The following:
-
-        order(process1, process2)
-
-is shorthand for
-
-        [Constraint(order = [process1.name(), process2.name()])]
-
-#### resources
-
-Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
-to allocate to the Task.
-
-#### max_failures
-
-`max_failures` is the number of times processes that are part of this
-Task can fail before the entire Task is marked for failure.
-
-For example:
-
-        template = Process(max_failures=10)
-        task = Task(
-          name = "fail",
-          processes = [
-             template(name = "failing", cmdline = "exit 1"),
-             template(name = "succeeding", cmdline = "exit 0")
-          ],
-          max_failures=2)
-
-The `failing` Process could fail 10 times before being marked as
-permanently failed, and the `succeeding` Process would succeed on the
-first run. The task would succeed despite only allowing for two failed
-processes. To be more specific, there would be 10 failed process runs
-yet 1 failed process.
-
-#### max_concurrency
-
-For Tasks with a number of expensive but otherwise independent
-processes, you may want to limit the amount of concurrency
-the Thermos scheduler provides rather than artificially constraining
-it via `order` constraints. For example, a test framework may
-generate a task with 100 test run processes, but wants to run it on
-a machine with only 4 cores. You can limit the amount of parallelism to
-4 by setting `max_concurrency=4` in your task configuration.
-
-For example, the following task spawns 180 Processes ("mappers")
-to compute individual elements of a 180 degree sine table, all dependent
-upon one final Process ("reducer") to tabulate the results:
-
-    def make_mapper(id):
-      return Process(
-        name = "mapper%03d" % id,
-        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
-                   temp.sine_table.%03d" % (id, id))
-
-    def make_reducer():
-      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
-                     && rm -f temp.\*")
-
-    processes = map(make_mapper, range(180))
-
-    task = Task(
-      name = "mapreduce",
-      processes = processes + [make\_reducer()],
-      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
-                     in processes],
-      max_concurrency = 8)
-
-#### finalization_wait
-
-Tasks have three active stages: `ACTIVE`, `CLEANING`, and `FINALIZING`. The
-`ACTIVE` stage is when ordinary processes run. This stage lasts as
-long as Processes are running and the Task is healthy. The moment either
-all Processes have finished successfully or the Task has reached a
-maximum Process failure limit, it goes into `CLEANING` stage and send
-SIGTERMs to all currently running Processes and their process trees.
-Once all Processes have terminated, the Task goes into `FINALIZING` stage
-and invokes the schedule of all Processes with the "final" attribute set to True.
-
-This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
-must happen within `finalization_wait` seconds. If it does not
-finish during that time, all remaining Processes are sent SIGKILLs
-(or if they depend upon uncompleted Processes, are
-never invoked.)
-
-Client applications with higher priority may force a shorter
-finalization wait (e.g. through parameters to `thermos kill`), so this
-is mostly a best-effort signal.
-
-### Constraint Object
-
-Current constraint objects only support a single ordering constraint, `order`,
-which specifies its processes run sequentially in the order given. By
-default, all processes run in parallel when bound to a `Task` without
-ordering constraints.
-
-   param | type           | description
-   ----- | :----:         | -----------
-   order | List of String | List of processes by name (String) that should be run serially.
-
-### Resource Object
-
-Specifies the amount of CPU, Ram, and disk resources the task needs. See the
-[Resource Isolation document](/documentation/latest/resource-isolation/) for suggested values and to understand how
-resources are allocated.
-
-  param      | type    | description
-  -----      | :----:  | -----------
-  ```cpu```  | Float   | Fractional number of cores required by the task.
-  ```ram```  | Integer | Bytes of RAM required by the task.
-  ```disk``` | Integer | Bytes of disk required by the task.
-
-Job Schema
-==========
-
-### Job Objects
-
-   name | type | description
-   ------ | :-------: | -------
-  ```task``` | Task | The Task object to bind to this job. Required.
-  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
-  ```role``` | String | Job role account. Required.
-  ```cluster``` | String | Cluster in which this job is scheduled. Required.
-   ```environment``` | String | Job environment, default ```devel```. Must be one of ```prod```, ```devel```, ```test``` or ```staging<number>```.
-  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
-  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
-   ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](/documentation/latest/cron-jobs/) for more information. Default: None (not a cron job.)
-  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL_EXISTING Kill the previous run, and schedule the new run CANCEL_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
-  ```update_config``` | ```update_config``` object | Parameters for controlling the rate and policy of rolling updates.
-  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#Specifying-Scheduling-Constraints)
-  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
-  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
-  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
-  ```production``` | Boolean |  Whether or not this is a production task backed by quota (Default: False). Production jobs may preempt any non-production job, and may only be preempted by production jobs in the same role and of higher priority. To run jobs at this level, the job role must have the appropriate quota.
-  ```health_check_config``` | ```heath_check_config``` object | Parameters for controlling a task's health checks via HTTP. Only used if a  health port was assigned with a command line wildcard.
-
-### Services
-
-Jobs with the `service` flag set to True are called Services. The `Service`
-alias can be used as shorthand for `Job` with `service=True`.
-Services are differentiated from non-service Jobs in that tasks
-always restart on completion, whether successful or unsuccessful.
-Jobs without the service bit set only restart up to
-`max_task_failures` times and only if they terminated unsuccessfully
-either due to human error or machine failure.
-
-### UpdateConfig Objects
-
-Parameters for controlling the rate and policy of rolling updates.
-
-| object                       | type     | description
-| ---------------------------- | :------: | ------------
-| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
-| ```restart_threshold```      | Integer  | Maximum number of seconds before a shard must move into the ```RUNNING``` state before considered a failure (Default: 60)
-| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
-| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
-| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
-
-### HealthCheckConfig Objects
-
-Parameters for controlling a task's health checks via HTTP.
-
-| object                         | type      | description
-| -------                        | :-------: | --------
-| ```initial_interval_secs```    | Integer   | Initial delay for performing an HTTP health check. (Default: 15)
-| ```interval_secs```            | Integer   | Interval on which to check the task's health via HTTP. (Default: 10)
-| ```timeout_secs```             | Integer   | HTTP request timeout. (Default: 1)
-| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that tolerated before considering a task unhealthy (Default: 0)
-
-### Announcer Objects
-
-If the `announce` field in the Job configuration is set, each task will be
-registered in the ServerSet `/aurora/role/environment/jobname` in the
-zookeeper ensemble configured by the executor.  If no Announcer object is specified,
-no announcement will take place.  For more information about ServerSets, see the [User Guide](/documentation/latest/user-guide/).
-
-| object                         | type      | description
-| -------                        | :-------: | --------
-| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
-| ```portmap```                  | dict      | A mapping of additional endpoints to announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
-
-### Port aliasing with the Announcer `portmap`
-
-The primary endpoint registered in the ServerSet is the one allocated to the port
-specified by the `primary_port` in the `Announcer` object, by default
-the `http` port.  This port can be referenced from anywhere within a configuration
-as `{{thermos.ports[http]}}`.
-
-Without the port map, each named port would be allocated a unique port number.
-The `portmap` allows two different named ports to be aliased together.  The default
-`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
-the `http` port.  Even though the two ports can be referenced independently,
-only one port is allocated by Mesos.  Any port referenced in a `Process` object
-but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
-
-It is possible to use the portmap to alias names to static port numbers, e.g.
-`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
-`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
-find a static port 80.  No port would be requested of or allocated by Mesos.
-
-Static ports should be used cautiously as Aurora does nothing to prevent two
-tasks with the same static port allocations from being co-scheduled.
-External constraints such as slave attributes should be used to enforce such
-guarantees should they be needed.
-
-Specifying Scheduling Constraints
-=================================
-
-Most users will not need to specify constraints explicitly, as the
-scheduler automatically inserts reasonable defaults that attempt to
-ensure reliability without impacting schedulability. For example, the
-scheduler inserts a `host: limit:1` constraint, ensuring
-that your shards run on different physical machines. Please do not
-set this field unless you are sure of what you are doing.
-
-In the `Job` object there is a map `constraints` from String to String
-allowing the user to tailor the schedulability of tasks within the job.
-
-Each slave in the cluster is assigned a set of string-valued
-key/value pairs called attributes. For example, consider the host
-`cluster1-aaa-03-sr2` and its following attributes (given in key:value
-format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
-
-The constraint map's key value is the attribute name in which we
-constrain Tasks within our Job. The value is how we constrain them.
-There are two types of constraints: *limit constraints* and *value
-constraints*.
-
-| constraint    | description
-| ------------- | --------------
-| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
-| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
-
-You can also control machine diversity using constraints. The below
-constraint ensures that no more than two instances of your job may run
-on a single host. Think of this as a "group by" limit.
-
-    constraints = {
-      'host': 'limit:2',
-    }
-
-Likewise, you can use constraints to control rack diversity, e.g. at
-most one task per rack:
-
-    constraints = {
-      'rack': 'limit:1',
-    }
-
-Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
-
-Template Namespaces
-===================
-
-Currently, a few Pystachio namespaces have special semantics. Using them
-in your configuration allow you to tailor application behavior
-through environment introspection or interact in special ways with the
-Aurora client or Aurora-provided services.
-
-### mesos Namespace
-
-The `mesos` namespace contains the `instance` variable that can be used
-to distinguish between Task replicas.
-
-| variable name     | type       | description
-| --------------- | :--------: | -------------
-| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
-
-### thermos Namespace
-
-The `thermos` namespace contains variables that work directly on the
-Thermos platform in addition to Aurora. This namespace is fully
-compatible with Tasks invoked via the `thermos` CLI.
-
-| variable      | type                     | description                        |
-| :----------:  | ---------                | ------------                       |
-| ```ports```   | map of string to Integer | A map of names to port numbers     |
-| ```task_id``` | string                   | The task ID assigned to this task. |
-
-The `thermos.ports` namespace is automatically populated by Aurora when
-invoking tasks on Mesos. When running the `thermos` command directly,
-these ports must be explicitly mapped with the `-P` option.
-
-For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
-configuration, it is automatically extracted and auto-populated by
-Aurora, but must be specified with, for example, `thermos -P http:12345`
-to map `http` to port 12345 when running via the CLI.
-
-Basic Examples
-==============
-
-These are provided to give a basic understanding of simple Aurora jobs.
-
-### hello_world.aurora
-
-Put the following in a file named `hello_world.aurora`, substituting your own values
-for values such as `cluster`s.
-
-    import os
-    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
-
-    hello_world_task = Task(
-      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
-      processes = [hello_world_process])
-
-    hello_world_job = Job(
-      cluster = 'cluster1',
-      role = os.getenv('USER'),
-      task = hello_world_task)
-
-    jobs = [hello_world_job]
-
-Then issue the following commands to create and kill the job, using your own values for the job key.
-
-    aurora create cluster1/$USER/test/hello_world hello_world.aurora
-
-    aurora kill cluster1/$USER/test/hello_world
-
-### Environment Tailoring
-
-#### hello_world_productionized.aurora
-
-Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
-for values such as `cluster`s.
-
-    include('hello_world.aurora')
-
-    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
-    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
-    hello_world_template = hello_world(
-        name = "hello_world-{{cluster}}"
-        task = hello_world(resources=production_resources))
-
-    jobs = [
-      # production jobs
-      hello_world_template(cluster = 'cluster1', instances = 25),
-      hello_world_template(cluster = 'cluster2', instances = 15),
-
-      # staging jobs
-      hello_world_template(
-        cluster = 'local',
-        instances = 1,
-        task = hello_world(resources=staging_resources)),
-    ]
-
-Then issue the following commands to create and kill the job, using your own values for the job key
-
-    aurora create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
-
-    aurora kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/latest/configuration-tutorial.md b/source/documentation/latest/configuration-tutorial.md
deleted file mode 100644
index c9f5a87..0000000
--- a/source/documentation/latest/configuration-tutorial.md
+++ /dev/null
@@ -1,1147 +0,0 @@
-Aurora Configuration Tutorial
-=============================
-
-How to write Aurora configuration files, including feature descriptions
-and best practices. When writing a configuration file, make use of
-`aurora inspect`. It takes the same job key and configuration file
-arguments as `aurora create` or `aurora update`. It first ensures the
-configuration parses, then outputs it in human-readable form.
-
-You should read this after going through the general [Aurora Tutorial](/documentation/latest/tutorial/).
-
-- [Aurora Configuration Tutorial](#user-content-aurora-configuration-tutorial)
-	- [The Basics](#user-content-the-basics)
-		- [Use Bottom-To-Top Object Ordering](#user-content-use-bottom-to-top-object-ordering)
-	- [An Example Configuration File](#user-content-an-example-configuration-file)
-	- [Defining Process Objects](#user-content-defining-process-objects)
-	- [Getting Your Code Into The Sandbox](#user-content-getting-your-code-into-the-sandbox)
-	- [Defining Task Objects](#user-content-defining-task-objects)
-		- [SequentialTask: Running Processes in Parallel or Sequentially](#user-content-sequentialtask-running-processes-in-parallel-or-sequentially)
-		- [SimpleTask](#user-content-simpletask)
-		- [Combining tasks](#user-content-combining-tasks)
-	- [Defining Job Objects](#user-content-defining-job-objects)
-	- [The jobs List](#user-content-the-jobs-list)
-	- [Templating](#user-content-templating)
-		- [Templating 1: Binding in Pystachio](#user-content-templating-1-binding-in-pystachio)
-		- [Structurals in Pystachio / Aurora](#user-content-structurals-in-pystachio--aurora)
-			- [Mustaches Within Structurals](#user-content-mustaches-within-structurals)
-		- [Templating 2: Structurals Are Factories](#user-content-templating-2-structurals-are-factories)
-			- [A Second Way of Templating](#user-content-a-second-way-of-templating)
-		- [Advanced Binding](#user-content-advanced-binding)
-			- [Bind Syntax](#user-content-bind-syntax)
-			- [Binding Complex Objects](#user-content-binding-complex-objects)
-				- [Lists](#user-content-lists)
-				- [Maps](#user-content-maps)
-				- [Structurals](#user-content-structurals)
-		- [Structural Binding](#user-content-structural-binding)
-	- [Configuration File Writing Tips And Best Practices](#user-content-configuration-file-writing-tips-and-best-practices)
-		- [Use As Few .aurora Files As Possible](#user-content-use-as-few-aurora-files-as-possible)
-		- [Avoid Boilerplate](#user-content-avoid-boilerplate)
-		- [Thermos Uses bash, But Thermos Is Not bash](#user-content-thermos-uses-bash-but-thermos-is-not-bash)
-			- [Bad](#user-content-bad)
-			- [Good](#user-content-good)
-		- [Rarely Use Functions In Your Configurations](#user-content-rarely-use-functions-in-your-configurations)
-			- [Bad](#user-content-bad-1)
-			- [Good](#user-content-good-1)
-
-The Basics
-----------
-
-To run a job on Aurora, you must specify a configuration file that tells
-Aurora what it needs to know to schedule the job, what Mesos needs to
-run the tasks the job is made up of, and what Thermos needs to run the
-processes that make up the tasks. This file must have
-a`.aurora` suffix.
-
-A configuration file defines a collection of objects, along with parameter
-values for their attributes. An Aurora configuration file contains the
-following three types of objects:
-
-- Job
-- Task
-- Process
-
-A configuration also specifies a list of `Job` objects assigned
-to the variable `jobs`.
-
-- jobs (list of defined Jobs to run)
-
-The `.aurora` file format is just Python. However, `Job`, `Task`,
-`Process`, and other classes are defined by a type-checked dictionary
-templating library called *Pystachio*, a powerful tool for
-configuration specification and reuse. Pystachio objects are tailored
-via {{}} surrounded templates.
-
-When writing your `.aurora` file, you may use any Pystachio datatypes, as
-well as any objects shown in the [*Aurora+Thermos Configuration
-Reference*](/documentation/latest/configuration-reference/), without `import` statements - the
-Aurora config loader injects them automatically. Other than that, an `.aurora`
-file works like any other Python script.
-
-[*Aurora+Thermos Configuration Reference*](/documentation/latest/configuration-reference/)
-has a full reference of all Aurora/Thermos defined Pystachio objects.
-
-### Use Bottom-To-Top Object Ordering
-
-A well-structured configuration starts with structural templates (if
-any). Structural templates encapsulate in their attributes all the
-differences between Jobs in the configuration that are not directly
-manipulated at the `Job` level, but typically at the `Process` or `Task`
-level. For example, if certain processes are invoked with slightly
-different settings or input.
-
-After structural templates, define, in order, `Process`es, `Task`s, and
-`Job`s.
-
-Structural template names should be *UpperCamelCased* and their
-instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
-and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
-spaces.
-
-An Example Configuration File
------------------------------
-
-The following is a typical configuration file. Don't worry if there are
-parts you don't understand yet, but you may want to refer back to this
-as you read about its individual parts. Note that names surrounded by
-curly braces {{}} are template variables, which the system replaces with
-bound values for the variables.
-
-    # --- templates here ---
-	class Profile(Struct):
-	  package_version = Default(String, 'live')
-	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
-	  extra_jvm_options = Default(String, '')
-	  parent_environment = Default(String, 'prod')
-	  parent_serverset = Default(String,
-                                 '/foocorp/service/bird/{{parent_environment}}/bird')
-
-	# --- processes here ---
-	main = Process(
-	  name = 'application',
-	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
-	            '{{profile.extra_jvm_options}} '
-	            '-jar application.jar '
-	            '-upstreamService {{profile.parent_serverset}}'
-	)
-
-	# --- tasks ---
-	base_task = SequentialTask(
-	  name = 'application',
-	  processes = [
-	    Process(
-	      name = 'fetch',
-	      cmdline = 'curl -O
-                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
-	  ]
-	)
-
-        # not always necessary but often useful to have separate task
-        # resource classes
-        staging_task = base_task(resources =
-                         Resources(cpu = 1.0,
-                                   ram = 2048*MB,
-                                   disk = 1*GB))
-	production_task = base_task(resources =
-                            Resources(cpu = 4.0,
-                                      ram = 2560*MB,
-                                      disk = 10*GB))
-
-	# --- job template ---
-	job_template = Job(
-	  name = 'application',
-	  role = 'myteam',
-	  contact = 'myteam-team@foocorp.com',
-	  instances = 20,
-	  service = True,
-	  task = production_task
-	)
-
-	# -- profile instantiations (if any) ---
-	PRODUCTION = Profile()
-	STAGING = Profile(
-	  extra_jvm_options = '-Xloggc:gc.log',
-	  parent_environment = 'staging'
-	)
-
-	# -- job instantiations --
-	jobs = [
-          job_template(cluster = 'cluster1', environment = 'prod')
-	               .bind(profile = PRODUCTION),
-
-          job_template(cluster = 'cluster2', environment = 'prod')
-	                .bind(profile = PRODUCTION),
-
-          job_template(cluster = 'cluster1',
-                        environment = 'staging',
-			service = False,
-			task = staging_task,
-			instances = 2)
-			.bind(profile = STAGING),
-	]
-
-## Defining Process Objects
-
-Processes are handled by the Thermos system. A process is a single
-executable step run as a part of an Aurora task, which consists of a
-bash-executable statement.
-
-The key (and required) `Process` attributes are:
-
--   `name`: Any string which is a valid Unix filename (no slashes,
-    NULLs, or leading periods). The `name` value must be unique relative
-    to other Processes in a `Task`.
--   `cmdline`: A command line run in a bash subshell, so you can use
-    bash scripts. Nothing is supplied for command-line arguments,
-    so `$*` is unspecified.
-
-Many tiny processes make managing configurations more difficult. For
-example, the following is a bad way to define processes.
-
-    copy = Process(
-      name = 'copy',
-      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
-    )
-    unpack = Process(
-      name = 'unpack',
-      cmdline = 'unzip app.zip'
-    )
-    remove = Process(
-      name = 'remove',
-      cmdline = 'rm -f app.zip'
-    )
-    run = Process(
-      name = 'app',
-      cmdline = 'java -jar app.jar'
-    )
-    run_task = Task(
-      processes = [copy, unpack, remove, run],
-      constraints = order(copy, unpack, remove, run)
-    )
-
-Since `cmdline` runs in a bash subshell, you can chain commands
-with `&&` or `||`.
-
-When defining a `Task` that is just a list of Processes run in a
-particular order, use `SequentialTask`, as described in the [*Defining*
-`Task` *Objects*](#Task) section. The following simplifies and combines the
-above multiple `Process` definitions into just two.
-
-    stage = Process(
-      name = 'stage',
-      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
-                'unzip app.zip && rm -f app.zip')
-
-    run = Process(name = 'app', cmdline = 'java -jar app.jar')
-
-    run_task = SequentialTask(processes = [stage, run])
-
-`Process` also has five optional attributes, each with a default value
-if one isn't specified in the configuration:
-
--   `max_failures`: Defaulting to `1`, the maximum number of failures
-    (non-zero exit statuses) before this `Process` is marked permanently
-    failed and not retried. If a `Process` permanently fails, Thermos
-    checks the `Process` object's containing `Task` for the task's
-    failure limit (usually 1) to determine whether or not the `Task`
-    should be failed. Setting `max_failures`to `0` means that this
-    process will keep retrying until a successful (zero) exit status is
-    achieved. Retries happen at most once every `min_duration` seconds
-    to prevent effectively mounting a denial of service attack against
-    the coordinating scheduler.
-
--   `daemon`: Defaulting to `False`, if `daemon` is set to `True`, a
-    successful (zero) exit status does not prevent future process runs.
-    Instead, the `Process` reinvokes after `min_duration` seconds.
-    However, the maximum failure limit (`max_failures`) still
-    applies. A combination of `daemon=True` and `max_failures=0` retries
-    a `Process` indefinitely regardless of exit status. This should
-    generally be avoided for very short-lived processes because of the
-    accumulation of checkpointed state for each process run. When
-    running in Aurora, `max_failures` is capped at
-    100.
-
--   `ephemeral`: Defaulting to `False`, if `ephemeral` is `True`, the
-    `Process`' status is not used to determine if its bound `Task` has
-    completed. For example, consider a `Task` with a
-    non-ephemeral webserver process and an ephemeral logsaver process
-    that periodically checkpoints its log files to a centralized data
-    store. The `Task` is considered finished once the webserver process
-    finishes, regardless of the logsaver's current status.
-
--   `min_duration`: Defaults to `15`. Processes may succeed or fail
-    multiple times during a single Task. Each result is called a
-    *process run* and this value is the minimum number of seconds the
-    scheduler waits before re-running the same process.
-
--   `final`: Defaulting to `False`, this is a finalizing `Process` that
-    should run last. Processes can be grouped into two classes:
-    *ordinary* and *finalizing*. By default, Thermos Processes are
-    ordinary. They run as long as the `Task` is considered
-    healthy (i.e. hasn't reached a failure limit). But once all regular
-    Thermos Processes have either finished or the `Task` has reached a
-    certain failure threshold, Thermos moves into a *finalization* stage
-    and runs all finalizing Processes. These are typically necessary for
-    cleaning up after the `Task`, such as log checkpointers, or perhaps
-    e-mail notifications of a completed Task. Finalizing processes may
-    not depend upon ordinary processes or vice-versa, however finalizing
-    processes may depend upon other finalizing processes and will
-    otherwise run as a typical process schedule.
-
-## Getting Your Code Into The Sandbox
-
-When using Aurora, you need to get your executable code into its "sandbox", specifically
-the Task sandbox where the code executes for the Processes that make up that Task.
-
-Each Task has a sandbox created when the Task starts and garbage
-collected when it finishes. All of a Task's processes run in its
-sandbox, so processes can share state by using a shared current
-working directory.
-
-Typically, you save this code somewhere. You then need to define a Process
-in your `.aurora` configuration file that fetches the code from that somewhere
-to where the slave can see it. For a public cloud, that can be anywhere public on
-the Internet, such as S3. For a private cloud internal storage, you need to put in
-on an accessible HDFS cluster or similar storage.
-
-The template for this Process is:
-
-    <name> = Process(
-      name = '<name>'
-      cmdline = '<command to copy and extract code archive into current working directory>'
-    )
-
-Note: Be sure the extracted code archive has an executable.
-
-## Defining Task Objects
-
-Tasks are handled by Mesos. A task is a collection of processes that
-runs in a shared sandbox. It's the fundamental unit Aurora uses to
-schedule the datacenter; essentially what Aurora does is find places
-in the cluster to run tasks.
-
-The key (and required) parts of a Task are:
-
--   `name`: A string giving the Task's name. By default, if a Task is
-    not given a name, it inherits the first name in its Process list.
-
--   `processes`: An unordered list of Process objects bound to the Task.
-    The value of the optional `constraints` attribute affects the
-    contents as a whole. Currently, the only constraint, `order`, determines if
-    the processes run in parallel or sequentially.
-
--   `resources`: A `Resource` object defining the Task's resource
-        footprint. A `Resource` object has three attributes:
-        -   `cpu`: A Float, the fractional number of cores the Task
-        requires.
-        -   `ram`: An Integer, RAM bytes the Task requires.
-        -   `disk`: An integer, disk bytes the Task requires.
-
-A basic Task definition looks like:
-
-    Task(
-        name="hello_world",
-        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
-        resources=Resources(cpu = 1.0,
-                            ram = 1*GB,
-                            disk = 1*GB))
-
-There are four optional Task attributes:
-
--   `constraints`: A list of `Constraint` objects that constrain the
-    Task's processes. Currently there is only one type, the `order`
-    constraint. For example the following requires that the processes
-    run in the order `foo`, then `bar`.
-
-        constraints = [Constraint(order=['foo', 'bar'])]
-
-    There is an `order()` function that takes `order('foo', 'bar', 'baz')`
-    and converts it into `[Constraint(order=['foo', 'bar', 'baz'])]`.
-    `order()` accepts Process name strings `('foo', 'bar')` or the processes
-    themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
-    `constraints=order(foo, bar)`
-
-    Note that Thermos rejects tasks with process cycles.
-
--   `max_failures`: Defaulting to `1`, the number of failed processes
-    needed for the `Task` to be marked as failed. Note how this
-    interacts with individual Processes' `max_failures` values. Assume a
-    Task has two Processes and a `max_failures` value of `2`. So both
-    Processes must fail for the Task to fail. Now, assume each of the
-    Task's Processes has its own `max_failures` value of `10`. If
-    Process "A" fails 5 times before succeeding, and Process "B" fails
-    10 times and is then marked as failing, their parent Task succeeds.
-    Even though there were 15 individual failures by its Processes, only
-    1 of its Processes was finally marked as failing. Since 1 is less
-    than the 2 that is the Task's `max_failures` value, the Task does
-    not fail.
-
--   `max_concurrency`: Defaulting to `0`, the maximum number of
-    concurrent processes in the Task. `0` specifies unlimited
-    concurrency. For Tasks with many expensive but otherwise independent
-    processes, you can limit the amount of concurrency Thermos schedules
-    instead of artificially constraining them through `order`
-    constraints. For example, a test framework may generate a Task with
-    100 test run processes, but runs it in a Task with
-    `resources.cpus=4`. Limit the amount of parallelism to 4 by setting
-    `max_concurrency=4`.
-
--   `finalization_wait`: Defaulting to `30`, the number of seconds
-    allocated for finalizing the Task's processes. A Task starts in
-    `ACTIVE` state when Processes run and stays there as long as the Task
-    is healthy and Processes run. When all Processes finish successfully
-    or the Task reaches its maximum process failure limit, it goes into
-    `CLEANING` state. In `CLEANING`, it sends `SIGTERMS` to any still running
-    Processes. When all Processes terminate, the Task goes into
-    `FINALIZING` state and invokes the schedule of all processes whose
-    final attribute has a True value. Everything from the end of `ACTIVE`
-    to the end of `FINALIZING` must happen within `finalization_wait`
-    number of seconds. If not, all still running Processes are sent
-    `SIGKILL`s (or if dependent on yet to be completed Processes, are
-    never invoked).
-
-### SequentialTask: Running Processes in Parallel or Sequentially
-
-By default, a Task with several Processes runs them in parallel. There
-are two ways to run Processes sequentially:
-
--   Include an `order` constraint in the Task definition's `constraints`
-    attribute whose arguments specify the processes' run order:
-
-        Task( ... processes=[process1, process2, process3],
-	          constraints = order(process1, process2, process3), ...)
-
--   Use `SequentialTask` instead of `Task`; it automatically runs
-    processes in the order specified in the `processes` attribute. No
-    `constraint` parameter is needed:
-
-        SequentialTask( ... processes=[process1, process2, process3] ...)
-
-### SimpleTask
-
-For quickly creating simple tasks, use the `SimpleTask` helper. It
-creates a basic task from a provided name and command line using a
-default set of resources. For example, in a .`aurora` configuration
-file:
-
-    SimpleTask(name="hello_world", command="echo hello world")
-
-is equivalent to
-
-    Task(name="hello_world",
-         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
-         resources=Resources(cpu = 1.0,
-                             ram = 1*GB,
-                             disk = 1*GB))
-
-The simplest idiomatic Job configuration thus becomes:
-
-    import os
-    hello_world_job = Job(
-      task=SimpleTask(name="hello_world", command="echo hello world"),
-      role=os.getenv('USER'),
-      cluster="cluster1")
-
-When written to `hello_world.aurora`, you invoke it with a simple
-`aurora create cluster1/$USER/test/hello_world hello_world.aurora`.
-
-### Combining tasks
-
-`Tasks.concat`(synonym,`concat_tasks`) and
-`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
-into a single Task. It may be easier to define complex Jobs
-as smaller constituent Tasks. But since a Job only includes a single
-Task, the subtasks must be combined before using them in a Job.
-Smaller Tasks can also be reused between Jobs, instead of having to
-repeat their definition for multiple Jobs.
-
-With both methods, the merged Task takes the first Task's name. The
-difference between the two is the result Task's process ordering.
-
--   `Tasks.combine` runs its subtasks' processes in no particular order.
-    The new Task's resource consumption is the sum of all its subtasks'
-    consumption.
-
--   `Tasks.concat` runs its subtasks in the order supplied, with each
-    subtask's processes run serially between tasks. It is analogous to
-    the `order` constraint helper, except at the Task level instead of
-    the Process level. The new Task's resource consumption is the
-    maximum value specified by any subtask for each Resource attribute
-    (cpu, ram and disk).
-
-For example, given the following:
-
-    setup_task = Task(
-      ...
-      processes=[download_interpreter, update_zookeeper],
-      # It is important to note that {{Tasks.concat}} has
-      # no effect on the ordering of the processes within a task;
-      # hence the necessity of the {{order}} statement below
-      # (otherwise, the order in which {{download_interpreter}}
-      # and {{update_zookeeper}} run will be non-deterministic)
-      constraints=order(download_interpreter, update_zookeeper),
-      ...
-    )
-
-    run_task = SequentialTask(
-      ...
-      processes=[download_application, start_application],
-      ...
-    )
-
-    combined_task = Tasks.concat(setup_task, run_task)
-
-The `Tasks.concat` command merges the two Tasks into a single Task and
-ensures all processes in `setup_task` run before the processes
-in `run_task`. Conceptually, the task is reduced to:
-
-    task = Task(
-      ...
-      processes=[download_interpreter, update_zookeeper,
-                 download_application, start_application],
-      constraints=order(download_interpreter, update_zookeeper,
-                        download_application, start_application),
-      ...
-    )
-
-In the case of `Tasks.combine`, the two schedules run in parallel:
-
-    task = Task(
-      ...
-      processes=[download_interpreter, update_zookeeper,
-                 download_application, start_application],
-      constraints=order(download_interpreter, update_zookeeper) +
-                        order(download_application, start_application),
-      ...
-    )
-
-In the latter case, each of the two sequences may operate in parallel.
-Of course, this may not be the intended behavior (for example, if
-the `start_application` Process implicitly relies
-upon `download_interpreter`). Make sure you understand the difference
-between using one or the other.
-
-## Defining Job Objects
-
-A job is a group of identical tasks that Aurora can run in a Mesos cluster.
-
-A `Job` object is defined by the values of several attributes, some
-required and some optional. The required attributes are:
-
--   `task`: Task object to bind to this job. Note that a Job can
-    only take a single Task.
-
--   `role`: Job's role account; in other words, the user account to run
-    the job as on a Mesos cluster machine. A common value is
-    `os.getenv('USER')`; using a Python command to get the user who
-    submits the job request. The other common value is the service
-    account that runs the job, e.g. `www-data`.
-
--   `environment`: Job's environment, typical values
-    are `devel`, `test`, or `prod`.
-
--   `cluster`: Aurora cluster to schedule the job in, defined in
-    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
-    jobs where the only difference is the `cluster`, then at run time
-    only run the Job whose job key includes your desired cluster's name.
-
-You usually see a `name` parameter. By default, `name` inherits its
-value from the Job's associated Task object, but you can override this
-default. For these four parameters, a Job definition might look like:
-
-    foo_job = Job( name = 'foo', cluster = 'cluster1',
-              role = os.getenv('USER'), environment = 'prod',
-              task = foo_task)
-
-In addition to the required attributes, there are several optional
-attributes. The first (strongly recommended) optional attribute is:
-
--   `contact`: An email address for the Job's owner. For production
-    jobs, it is usually a team mailing list.
-
-Two more attributes deal with how to handle failure of the Job's Task:
-
--   `max_task_failures`: An integer, defaulting to `1`, of the maximum
-    number of Task failures after which the Job is considered failed.
-    `-1` allows for infinite failures.
-
--   `service`: A boolean, defaulting to `False`, which if `True`
-    restarts tasks regardless of whether they succeeded or failed. In
-    other words, if `True`, after the Job's Task completes, it
-    automatically starts again. This is for Jobs you want to run
-    continuously, rather than doing a single run.
-
-Three attributes deal with configuring the Job's Task:
-
--   `instances`: Defaulting to `1`, the number of
-    instances/replicas/shards of the Job's Task to create.
-
--   `priority`: Defaulting to `0`, the Job's Task's preemption priority,
-    for which higher values may preempt Tasks from Jobs with lower
-    values.
-
--   `production`: a Boolean, defaulting to `False`, specifying that this
-    is a production job backed by quota. Tasks from production Jobs may
-    preempt tasks from any non-production job, and may only be preempted
-    by tasks from production jobs in the same role with higher
-    priority. **WARNING**: To run Jobs at this level, the Job role must
-    have the appropriate quota.
-
-The final three Job attributes each take an object as their value.
-
--   `update_config`: An `UpdateConfig`
-    object provides parameters for controlling the rate and policy of
-    rolling updates. The `UpdateConfig` parameters are:
-    -   `batch_size`: An integer, defaulting to `1`, specifying the
-        maximum number of shards to update in one iteration.
-    -   `restart_threshold`: An integer, defaulting to `60`, specifying
-        the maximum number of seconds before a shard must move into the
-        `RUNNING` state before considered a failure.
-    -   `watch_secs`: An integer, defaulting to `45`, specifying the
-        minimum number of seconds a shard must remain in the `RUNNING`
-        state before considered a success.
-    -   `max_per_shard_failures`: An integer, defaulting to `0`,
-        specifying the maximum number of restarts per shard during an
-        update. When the limit is exceeded, it increments the total
-        failure count.
-    -   `max_total_failures`: An integer, defaulting to `0`, specifying
-        the maximum number of shard failures tolerated during an update.
-        Cannot be equal to or greater than the job's total number of
-        tasks.
--   `health_check_config`: A `HealthCheckConfig` object that provides
-    parameters for controlling a Task's health checks via HTTP. Only
-    used if a health port was assigned with a command line wildcard. The
-    `HealthCheckConfig` parameters are:
-    -   `initial_interval_secs`: An integer, defaulting to `15`,
-        specifying the initial delay for doing an HTTP health check.
-    -   `interval_secs`: An integer, defaulting to `10`, specifying the
-        number of seconds in the interval between checking the Task's
-        health.
-    -   `timeout_secs`: An integer, defaulting to `1`, specifying the
-        number of seconds the application must respond to an HTTP health
-        check with `OK` before it is considered a failure.
-    -   `max_consecutive_failures`: An integer, defaulting to `0`,
-        specifying the maximum number of consecutive failures before a
-        task is unhealthy.
--   `constraints`: A `dict` Python object, specifying Task scheduling
-    constraints. Most users will not need to specify constraints, as the
-    scheduler automatically inserts reasonable defaults. Please do not
-    set this field unless you are sure of what you are doing. See the
-    section in the Aurora + Thermos Reference manual on [Specifying
-    Scheduling Constraints](/documentation/latest/configuration-reference/) for more information.
-
-## The jobs List
-
-At the end of your `.aurora` file, you need to specify a list of the
-file's defined Jobs to run in the order listed. For example, the
-following runs first `job1`, then `job2`, then `job3`.
-
-jobs = [job1, job2, job3]
-
-Templating
-----------
-
-The `.aurora` file format is just Python. However, `Job`, `Task`,
-`Process`, and other classes are defined by a templating library called
-*Pystachio*, a powerful tool for configuration specification and reuse.
-
-[Aurora+Thermos Configuration Reference](/documentation/latest/configuration-reference/)
-has a full reference of all Aurora/Thermos defined Pystachio objects.
-
-When writing your `.aurora` file, you may use any Pystachio datatypes, as
-well as any objects shown in the *Aurora+Thermos Configuration
-Reference* without `import` statements - the Aurora config loader
-injects them automatically. Other than that the `.aurora` format
-works like any other Python script.
-
-### Templating 1: Binding in Pystachio
-
-Pystachio uses the visually distinctive {{}} to indicate template
-variables. These are often called "mustache variables" after the
-similarly appearing variables in the Mustache templating system and
-because the curly braces resemble mustaches.
-
-If you are familiar with the Mustache system, templates in Pystachio
-have significant differences. They have no nesting, joining, or
-inheritance semantics. On the other hand, when evaluated, templates
-are evaluated iteratively, so this affords some level of indirection.
-
-Let's start with the simplest template; text with one
-variable, in this case `name`;
-
-    Hello {{name}}
-
-If we evaluate this as is, we'd get back:
-
-    Hello
-
-If a template variable doesn't have a value, when evaluated it's
-replaced with nothing. If we add a binding to give it a value:
-
-    { "name" : "Tom" }
-
-We'd get back:
-
-    Hello Tom
-
-We can also use {{}} variables as sectional variables. Let's say we
-have:
-
-    {{#x}} Testing... {{/x}}
-
-If `x` evaluates to `True`, the text between the sectional tags is
-shown. If there is no value for `x` or it evaluates to `False`, the
-between tags text is not shown. So, at a basic level, a sectional
-variable acts as a conditional.
-
-However, if the sectional variable evaluates to a list, array, etc. it
-acts as a `foreach`. For example,
-
-    {{#x}} {{name}} {{/x}}
-
-with
-
-    { "x": [ { "name" : "tic" } { "name" : "tac" } { "name" : "toe" } ] }
-
-evaluates to
-
-    tic tac toe
-
-Every Pystachio object has an associated `.bind` method that can bind
-values to {{}} variables. Bindings are not immediately evaluated.
-Instead, they are evaluated only when the interpolated value of the
-object is necessary, e.g. for performing equality or serializing a
-message over the wire.
-
-Objects with and without mustache templated variables behave
-differently:
-
-    >>> Float(1.5)
-    Float(1.5)
-
-    >>> Float('{{x}}.5')
-    Float({{x}}.5)
-
-    >>> Float('{{x}}.5').bind(x = 1)
-    Float(1.5)
-
-    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
-    True
-
-    >>> contextual_object = String('{{metavar{{number}}}}').bind(
-    ... metavar1 = "first", metavar2 = "second")
-
-    >>> contextual_object
-    String({{metavar{{number}}}})
-
-    >>> contextual_object.bind(number = 1)
-    String(first)
-
-    >>> contextual_object.bind(number = 2)
-    String(second)
-
-You usually bind simple key to value pairs, but you can also bind three
-other objects: lists, dictionaries, and structurals. These will be
-described in detail later.
-
-### Structurals in Pystachio / Aurora
-
-Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
-`Float`, or `Integer` Pystashio objects directly. Instead they interact
-with derived structural (`Struct`) objects that are collections of
-fundamental and structural objects. The structural object components are
-called *attributes*. Aurora's most used structural objects are `Job`,
-`Task`, and `Process`:
-
-    class Process(Struct):
-      cmdline = Required(String)
-      name = Required(String)
-      max_failures = Default(Integer, 1)
-      daemon = Default(Boolean, False)
-      ephemeral = Default(Boolean, False)
-      min_duration = Default(Integer, 5)
-      final = Default(Boolean, False)
-
-Construct default objects by following the object's type with (). If you
-want an attribute to have a value different from its default, include
-the attribute name and value inside the parentheses.
-
-    >>> Process()
-    Process(daemon=False, max_failures=1, ephemeral=False,
-      min_duration=5, final=False)
-
-Attribute values can be template variables, which then receive specific
-values when creating the object.
-
-    >>> Process(cmdline = 'echo {{message}}')
-    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
-            cmdline=echo {{message}}, final=False)
-
-    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
-    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
-            cmdline=echo hello world, final=False)
-
-A powerful binding property is that all of an object's children inherit its
-bindings:
-
-    >>> List(Process)([
-    ... Process(name = '{{prefix}}_one'),
-    ... Process(name = '{{prefix}}_two')
-    ... ]).bind(prefix = 'hello')
-    ProcessList(
-      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
-      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
-      )
-
-Remember that an Aurora Job contains Tasks which contain Processes. A
-Job level binding is inherited by its Tasks and all their Processes.
-Similarly a Task level binding is available to that Task and its
-Processes but is *not* visible at the Job level (inheritance is a
-one-way street.)
-
-#### Mustaches Within Structurals
-
-When you define a `Struct` schema, one powerful, but confusing, feature
-is that all of that structure's attributes are Mustache variables within
-the enclosing scope *once they have been populated*.
-
-For example, when `Process` is defined above, all its attributes such as
-{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
-defined as Mustache variables, implicitly bound into the `Process`, and
-inherit all child objects once they are defined.
-
-Thus, you can do the following:
-
-    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
-    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
-            cmdline=echo installer is running, final=False)
-
-WARNING: This binding only takes place in one direction. For example,
-the following does NOT work and does not set the `Process` `name`
-attribute's value.
-
-    >>> Process().bind(name = "installer")
-    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
-
-The following is also not possible and results in an infinite loop that
-attempts to resolve `Process.name`.
-
-    >>> Process(name = '{{name}}').bind(name = 'installer')
-
-Do not confuse Structural attributes with bound Mustache variables.
-Attributes are implicitly converted to Mustache variables but not vice
-versa.
-
-### Templating 2: Structurals Are Factories
-
-#### A Second Way of Templating
-
-A second templating method is both as powerful as the aforementioned and
-often confused with it. This method is due to automatic conversion of
-Struct attributes to Mustache variables as described above.
-
-Suppose you create a Process object:
-
-    >>> p = Process(name = "process_one", cmdline = "echo hello world")
-
-    >>> p
-    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
-            cmdline=echo hello world, final=False)
-
-This `Process` object, "`p`", can be used wherever a `Process` object is
-needed. It can also be reused by changing the value(s) of its
-attribute(s). Here we change its `name` attribute from `process_one` to
-`process_two`.
-
-    >>> p(name = "process_two")
-    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
-            cmdline=echo hello world, final=False)
-
-Template creation is a common use for this technique:
-
-    >>> Daemon = Process(daemon = True)
-    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
-    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
-
-### Advanced Binding
-
-As described above, `.bind()` binds simple strings or numbers to
-Mustache variables. In addition to Structural types formed by combining
-atomic types, Pystachio has two container types; `List` and `Map` which
-can also be bound via `.bind()`.
-
-#### Bind Syntax
-
-The `bind()` function can take Python dictionaries or `kwargs`
-interchangeably (when "`kwargs`" is in a function definition, `kwargs`
-receives a Python dictionary containing all keyword arguments after the
-formal parameter list).
-
-    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
-    True
-
-Bindings done "closer" to the object in question take precedence:
-
-    >>> p = Process(name = '{{context}}_process')
-    >>> t = Task().bind(context = 'global')
-    >>> t(processes = [p, p.bind(context = 'local')])
-    Task(processes=ProcessList(
-      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
-              min_duration=5),
-      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
-              min_duration=5)
-    ))
-
-#### Binding Complex Objects
-
-##### Lists
-
-    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
-    >>> String('{{fib[4]}}').bind(fib = fibonacci)
-    String(5)
-
-##### Maps
-
-    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
-    >>> String('{{first[Kent]}}').bind(first = first_names)
-    String(Clark)
-
-##### Structurals
-
-    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
-    String(echo hello world)
-
-### Structural Binding
-
-Use structural templates when binding more than two or three individual
-values at the Job or Task level. For fewer than two or three, standard
-key to string binding is sufficient.
-
-Structural binding is a very powerful pattern and is most useful in
-Aurora/Thermos for doing Structural configuration. For example, you can
-define a job profile. The following profile uses `HDFS`, the Hadoop
-Distributed File System, to designate a file's location. `HDFS` does
-not come with Aurora, so you'll need to either install it separately
-or change the way the dataset is designated.
-
-    class Profile(Struct):
-      version = Required(String)
-      environment = Required(String)
-      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
-
-    PRODUCTION = Profile(version = 'live', environment = 'prod')
-    DEVEL = Profile(version = 'latest',
-                    environment = 'devel',
-                    dataset = 'hdfs://home/aurora/data/test')
-    TEST = Profile(version = 'latest', environment = 'test')
-
-    JOB_TEMPLATE = Job(
-      name = 'application',
-      role = 'myteam',
-      cluster = 'cluster1',
-      environment = '{{profile.environment}}',
-      task = SequentialTask(
-        name = 'task',
-        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
-        processes = [
-	  Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
-                 {{profile.dataset}}')
-        ]
-       )
-     )
-
-    jobs = [
-      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
-      JOB_TEMPLATE.bind(profile = DEVEL),
-      JOB_TEMPLATE.bind(profile = TEST),
-     ]
-
-In this case, a custom structural "Profile" is created to self-document
-the configuration to some degree. This also allows some schema
-"type-checking", and for default self-substitution, e.g. in
-`Profile.dataset` above.
-
-So rather than a `.bind()` with a half-dozen substituted variables, you
-can bind a single object that has sensible defaults stored in a single
-place.
-
-Configuration File Writing Tips And Best Practices
---------------------------------------------------
-
-### Use As Few .aurora Files As Possible
-
-When creating your `.aurora` configuration, try to keep all versions of
-a particular job within the same `.aurora` file. For example, if you
-have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
-testing, and`cluster2`, keep them as close together as possible.
-
-Constructs shared across multiple jobs owned by your team (e.g.
-team-level defaults or structural templates) can be split into separate
-`.aurora`files and included via the `include` directive.
-
-### Avoid Boilerplate
-
-If you see repetition or find yourself copy and pasting any parts of
-your configuration, it's likely an opportunity for templating. Take the
-example below:
-
-`redundant.aurora` contains:
-
-    download = Process(
-      name = 'download',
-      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
-      max_failures = 5,
-      min_duration = 1)
-
-    unpack = Process(
-      name = 'unpack',
-      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
-      max_failures = 5,
-      min_duration = 1)
-
-    build = Process(
-      name = 'build',
-      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
-      max_failures = 1)
-
-    email = Process(
-      name = 'email',
-      cmdline = 'echo Success | mail feynman@tmc.com',
-      max_failures = 5,
-      min_duration = 1)
-
-    build_python = Task(
-      name = 'build_python',
-      processes = [download, unpack, build, email],
-      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
-
-As you'll notice, there's a lot of repetition in the `Process`
-definitions. For example, almost every process sets a `max_failures`
-limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
-into a common process template.
-
-Furthermore, the Python version is repeated everywhere. This can be
-bound via structural templating as described in the [Advanced Binding](#AdvancedBinding)
-section.
-
-`less_redundant.aurora` contains:
-
-    class Python(Struct):
-      version = Required(String)
-      base = Default(String, 'Python-{{version}}')
-      package = Default(String, '{{base}}.tar.bz2')
-
-    ReliableProcess = Process(
-      max_failures = 5,
-      min_duration = 1)
-
-    download = ReliableProcess(
-      name = 'download',
-      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
-
-    unpack = ReliableProcess(
-      name = 'unpack',
-      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
-
-    build = ReliableProcess(
-      name = 'build',
-      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
-      max_failures = 1)
-
-    email = ReliableProcess(
-      name = 'email',
-      cmdline = 'echo Success | mail {{role}}@foocorp.com')
-
-    build_python = SequentialTask(
-      name = 'build_python',
-      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
-
-### Thermos Uses bash, But Thermos Is Not bash
-
-#### Bad
-
-Many tiny Processes makes for harder to manage configurations.
-
-    copy = Process(
-      name = 'copy',
-      cmdline = 'rcp user@my_machine:my_application .'
-     )
-
-     unpack = Process(
-       name = 'unpack',
-       cmdline = 'unzip app.zip'
-     )
-
-     remove = Process(
-       name = 'remove',
-       cmdline = 'rm -f app.zip'
-     )
-
-     run = Process(
-       name = 'app',
-       cmdline = 'java -jar app.jar'
-     )
-
-     run_task = Task(
-       processes = [copy, unpack, remove, run],
-       constraints = order(copy, unpack, remove, run)
-     )
-
-#### Good
-
-Each `cmdline` runs in a bash subshell, so you have the full power of
-bash. Chaining commands with `&&` or `||` is almost always the right
-thing to do.
-
-Also for Tasks that are simply a list of processes that run one after
-another, consider using the `SequentialTask` helper which applies a
-linear ordering constraint for you.
-
-    stage = Process(
-      name = 'stage',
-      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
-
-    run = Process(name = 'app', cmdline = 'java -jar app.jar')
-
-    run_task = SequentialTask(processes = [stage, run])
-
-### Rarely Use Functions In Your Configurations
-
-90% of the time you define a function in a `.aurora` file, you're
-probably Doing It Wrong(TM).
-
-#### Bad
-
-    def get_my_task(name, user, cpu, ram, disk):
-      return Task(
-        name = name,
-        user = user,
-        processes = [STAGE_PROCESS, RUN_PROCESS],
-        constraints = order(STAGE_PROCESS, RUN_PROCESS),
-        resources = Resources(cpu = cpu, ram = ram, disk = disk)
-     )
-
-     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
-     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
-
-#### Good
-
-This one is more idiomatic. Forced keyword arguments prevents accidents,
-e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
-disk. Less proliferation of task-construction techniques means
-easier-to-read, quicker-to-understand, and a more composable
-configuration.
-
-    TASK_TEMPLATE = SequentialTask(
-      user = 'wickman',
-      processes = [STAGE_PROCESS, RUN_PROCESS],
-    )
-
-    task_one = TASK_TEMPLATE(
-      name = 'task_one',
-      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
-
-    task_two = TASK_TEMPLATE(
-      name = 'task_two',
-      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
-    )
diff --git a/source/documentation/latest/contributing.md b/source/documentation/latest/contributing.md
index 0ac8954..76d4cef 100644
--- a/source/documentation/latest/contributing.md
+++ b/source/documentation/latest/contributing.md
@@ -1,42 +1,60 @@
-Get the Source Code
--------------------
+## Get the Source Code
+
 First things first, you'll need the source! The Aurora source is available from Apache git:
 
-    git clone https://git-wip-us.apache.org/repos/asf/incubator-aurora
+    git clone https://gitbox.apache.org/repos/asf/aurora
 
-Find Something to Do
---------------------
+Read the Style Guides
+---------------------
+Aurora's codebase is primarily Java and Python and conforms to the Twitter Commons styleguides for
+both languages.
+
+- [Java Style Guide](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/styleguide.md)
+- [Python Style Guide](https://github.com/twitter/commons/blob/master/src/python/twitter/common/styleguide.md)
+
+## Find Something to Do
+
 There are issues in [Jira](https://issues.apache.org/jira/browse/AURORA) with the
-["newbie" tag](https://issues.apache.org/jira/browse/AURORA-189?jql=project%20%3D%20AURORA%20AND%20resolution%20%3D%20Unresolved%20AND%20labels%20%3D%20newbie%20ORDER%20BY%20priority%20DESC)
-that are good starting places for new Aurora contributors; pick one of these and dive in! Once
-you've got a patch, the next step is to post a review.
+["newbie" label](https://issues.apache.org/jira/issues/?jql=project%20%3D%20AURORA%20AND%20labels%20%3D%20newbie%20and%20resolution%3Dunresolved)
+that are good starting places for new Aurora contributors; pick one of these and dive in! To assign
+a task to yourself, first ask for your JIRA id to be whitelisted by either asking in IRC/Slack or by
+emailing dev@apache.aurora.org. Once your JIRA account has been whitelisted you can assign tickets
+to yourself. The next step is to prepare your patch and finally post it for review.
 
-Getting your ReviewBoard Account
---------------------------------
+## Getting your ReviewBoard Account
+
 Go to https://reviews.apache.org and create an account.
 
-Setting up your ReviewBoard Environment
----------------------------------------
+## Setting up your ReviewBoard Environment
+
 Run `./rbt status`. The first time this runs it will bootstrap and you will be asked to login.
 Subsequent runs will cache your login credentials.
 
-Submitting a Patch for Review
------------------------------
+## Submitting a Patch for Review
+
 Post a review with `rbt`, fill out the fields in your browser and hit Publish.
 
     ./rbt post -o
 
+If you're unsure about who to add as a reviewer, you can default to adding Stephan Erb (StephanErb) and
+Renan DelValle (rdelvalle). They will take care of finding an appropriate reviewer for the patch.
+
 Once you've done this, you probably want to mark the associated Jira issue as Reviewable.
 
-Updating an Existing Review
----------------------------
+## Updating an Existing Review
+
 Incorporate review feedback, make some more commits, update your existing review, fill out the
 fields in your browser and hit Publish.
 
     ./rbt post -o -r <RB_ID>
 
-Merging Your Own Review (Committers)
-------------------------------------
+## Getting Your Review Merged
+
+If you're not an Aurora committer, one of the committers will merge your change in as described
+below. Generally, the last reviewer to give the review a 'Ship It!' will be responsible.
+
+### Merging Your Own Review (Committers)
+
 Once you have shipits from the right committers, merge your changes in a single commit and mark
 the review as submitted. The typical workflow is:
 
@@ -51,8 +69,8 @@
 Note that even if you're developing using feature branches you will not use `git merge` - each
 commit will be an atomic change accompanied by a ReviewBoard entry.
 
-Merging Someone Else's Review
------------------------------
+### Merging Someone Else's Review
+
 Sometimes you'll need to merge someone else's RB. The typical workflow for this is
 
     git checkout master
@@ -61,8 +79,15 @@
     git show master  # Verify everything looks sane, author is correct
     git push origin master
 
-Cleaning Up
------------
+Note for committers: while we generally use the commit message generated by `./rbt patch` some
+changes are often required:
+
+1. Ensure the the commit message does not exceed 100 characters per line.
+2. Remove the "Testing Done" section. It's generally redundant (can be seen by checking the linked
+  review) or entirely irrelevant to the commit itself.
+
+## Cleaning Up
+
 Your patch has landed, congratulations! The last thing you'll want to do before moving on to your
 next fix is to clean up your Jira and Reviewboard. The former of which should be marked as
 "Resolved" while the latter should be marked as "Submitted".
diff --git a/source/documentation/latest/cron-jobs.md b/source/documentation/latest/cron-jobs.md
deleted file mode 100644
index 14e6895..0000000
--- a/source/documentation/latest/cron-jobs.md
+++ /dev/null
@@ -1,131 +0,0 @@
-# Cron Jobs
-
-Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
-
-- [Overview](#overview)
-- [Collision Policies](#collision-policies)
-	- [KILL_EXISTING](#kill_existing)
-	- [CANCEL_NEW](#cancel_new)
-- [Failure recovery](#failure-recovery)
-- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
-	- [cron schedule](#cron-schedule)
-	- [cron deschedule](#cron-deschedule)
-	- [cron start](#cron-start)
-	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
-- [Technical Note About Syntax](#technical-note-about-syntax)
-- [Caveats](#caveats)
-	- [Failovers](#failovers)
-	- [Collision policy is best-effort](#collision-policy-is-best-effort)
-	- [Timezone Configuration](#timezone-configuration)
-
-## Overview
-
-A job is identified as a cron job by the presence of a
-`cron_schedule` attribute containing a cron-style schedule in the
-[`Job`](configuration-reference.md#job-objects) object. Examples of cron schedules
-include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
-"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
-
-Example (available in the [Vagrant environment](/documentation/latest/vagrant/)):
-
-    $ cat /vagrant/examples/job/cron_hello_world.aurora
-    # cron_hello_world.aurora
-    # A cron job that runs every 5 minutes.
-    jobs = [
-      Job(
-        cluster = 'devcluster',
-        role = 'www-data',
-        environment = 'test',
-        name = 'cron_hello_world',
-        cron_schedule = '*/5 * * * *',
-        task = SimpleTask(
-          'cron_hello_world',
-          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
-      ),
-    ]
-
-## Collision Policies
-
-The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
-triggered while an older run hasn't finished. The scheduler has two policies available,
-[KILL_EXISTING](#kill_existing) and [CANCEL_NEW](#cancel_new).
-
-### KILL_EXISTING
-
-The default policy - on a collision the old instances are killed and a instances with the current
-configuration are started.
-
-### CANCEL_NEW
-
-On a collision the new run is cancelled.
-
-Note that the use of this flag is likely a code smell - interrupted cron jobs should be able
-to recover their progress on a subsequent invocation, otherwise they risk having their work queue
-grow faster than they can process it.
-
-## Failure recovery
-
-Unlike with services, which aurora will always re-execute regardless of exit status, instances of
-cron jobs retry according to the `max_task_failures` attribute of the
-[Task](configuration-reference.md#task-objects) object. To get "run-until-failure" semantics,
-set `max_task_failures` to `-1`.
-
-## Interacting with cron jobs via the Aurora CLI
-
-Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora2 help cron`
-for up-to-date usage instructions.
-
-### cron schedule
-Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
-with a new one. Only future runs will be affected, any existing active tasks are left intact.
-
-    $ aurora2 cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
-
-### cron deschedule
-Deschedules a cron job, preventing future runs but allowing current runs to complete.
-
-    $ aurora2 cron deschedule devcluster/www-data/test/cron_hello_world
-
-### cron start
-Start a cron job immediately, outside of its normal cron schedule.
-
-    $ aurora2 cron start devcluster/www-data/test/cron_hello_world
-
-### job killall, job restart, job kill
-Cron jobs create instances running on the cluster that you can interact with like normal Aurora
-tasks with `job kill` and `job restart`.
-
-## Technical Note About Syntax
-
-`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
-execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
-[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
-[the source](https://github.com/apache/incubator-aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
-for details.
-
-## Caveats
-
-### Failovers
-No failover recovery. Aurora does not record the latest minute it fired
-triggers for across failovers. Therefore it's possible to miss triggers
-on failover. Note that this behavior may change in the future.
-
-It's necessary to sync time between schedulers with something like `ntpd`.
-Clock skew could cause double or missed triggers in the case of a failover.
-
-### Collision policy is best-effort
-Aurora aims to always have *at least one copy* of a given instance running at a time - it's
-an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
-Consistency.
-
-If your collision policy was `CANCEL_NEW` and a task has terminated but
-Aurora has not noticed this Aurora will go ahead and create your new
-task.
-
-If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
-but not yet GCed Aurora will go ahead and create your new task without
-attempting to kill the old one (outside the GC interval).
-
-### Timezone Configuration
-Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
-defaults to UTC.
diff --git a/source/documentation/latest/deploying-aurora-scheduler.md b/source/documentation/latest/deploying-aurora-scheduler.md
deleted file mode 100644
index 5b6052e..0000000
--- a/source/documentation/latest/deploying-aurora-scheduler.md
+++ /dev/null
@@ -1,259 +0,0 @@
-# Deploying the Aurora Scheduler
-
-When setting up your cluster, you will install the scheduler on a small number (usually 3 or 5) of
-machines.  This guide helps you get the scheduler set up and troubleshoot some common hurdles.
-
-- [Installing Aurora](#installing-aurora)
-  - [Creating the Distribution .zip File (Optional)](#creating-the-distribution-zip-file-optional)
-  - [Installing Aurora](#installing-aurora-1)
-- [Configuring Aurora](#configuring-aurora)
-  - [A Note on Configuration](#a-note-on-configuration)
-  - [Replicated Log Configuration](#replicated-log-configuration)
-  - [Initializing the Replicated Log](#initializing-the-replicated-log)
-  - [Storage Performance Considerations](#storage-performance-considerations)
-  - [Network considerations](#network-considerations)
-- [Running Aurora](#running-aurora)
-  - [Maintaining an Aurora Installation](#maintaining-an-aurora-installation)
-  - [Monitoring](#monitoring)
-  - [Running stateful services](#running-stateful-services)
-    - [Dedicated attribute](#dedicated-attribute)
-      - [Syntax](#syntax)
-      - [Example](#example)
-- [Common problems](#common-problems)
-  - [Replicated log not initialized](#replicated-log-not-initialized)
-    - [Symptoms](#symptoms)
-    - [Solution](#solution)
-  - [Scheduler not registered](#scheduler-not-registered)
-    - [Symptoms](#symptoms-1)
-    - [Solution](#solution-1)
-  - [Tasks are stuck in PENDING forever](#tasks-are-stuck-in-pending-forever)
-    - [Symptoms](#symptoms-2)
-    - [Solution](#solution-2)
-
-## Installing Aurora
-The Aurora scheduler is a standalone Java server. As part of the build process it creates a bundle
-of all its dependencies, with the notable exceptions of the JVM and libmesos. Each target server
-should have a JVM (Java 7 or higher) and libmesos (0.20.1) installed.
-
-### Creating the Distribution .zip File (Optional)
-To create a distribution for installation you will need build tools installed. On Ubuntu this can be
-done with `sudo apt-get install build-essential default-jdk`.
-
-    git clone http://git-wip-us.apache.org/repos/asf/incubator-aurora.git
-    cd incubator-aurora
-    ./gradlew distZip
-
-Copy the generated `dist/distributions/aurora-scheduler-*.zip` to each node that will run a scheduler.
-
-### Installing Aurora
-Extract the aurora-scheduler zip file. The example configurations assume it is extracted to
-`/usr/local/aurora-scheduler`.
-
-    sudo unzip dist/distributions/aurora-scheduler-*.zip -d /usr/local
-    sudo ln -nfs "$(ls -dt /usr/local/aurora-scheduler-* | head -1)" /usr/local/aurora-scheduler
-
-## Configuring Aurora
-
-### A Note on Configuration
-Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
-"configuration file" is typically a `scheduler.sh` shell script of the form.
-
-    #!/bin/bash
-    AURORA_HOME=/usr/local/aurora-scheduler
-
-    # Flags controlling the JVM.
-    JAVA_OPTS=(
-      -Xmx2g
-      -Xms2g
-      # GC tuning, etc.
-    )
-
-    # Flags controlling the scheduler.
-    AURORA_FLAGS=(
-      -http_port=8081
-      # Log configuration, etc.
-    )
-
-    # Environment variables controlling libmesos
-    export JAVA_HOME=...
-    export GLOG_v=1
-    export LIBPROCESS_PORT=8083
-
-    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
-
-That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
-
-Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
-documentation run
-
-    /usr/local/aurora-scheduler/bin/aurora-scheduler -help
-
-### Replicated Log Configuration
-All Aurora state is persisted to a replicated log. This includes all jobs Aurora is running
-including where in the cluster they are being run and the configuration for running them, as
-well as other information such as metadata needed to reconnect to the Mesos master, resource
-quotas, and any other locks in place.
-
-Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
-leader at a given time - the other schedulers follow log writes and prepare to take over as leader
-but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
-production deployment depending on failure tolerance and they must have persistent storage.
-
-In a cluster with `N` schedulers, the flag `-native_log_quorum_size` should be set to
-`floor(N/2) + 1`. So in a cluster with 1 scheduler it should be set to `1`, in a cluster with 3 it
-should be set to `2`, and in a cluster of 5 it should be set to `3`.
-
-  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
-  ------------------------ | -------------------------------------------------------------
-  1                        | 1
-  3                        | 2
-  5                        | 3
-  7                        | 4
-
-*Incorrectly setting this flag will cause data corruption to occur!*
-
-See [this document](storage-config.md#scheduler-storage-configuration-flags) for more replicated
-log and storage configuration options.
-
-## Initializing the Replicated Log
-Before you start Aurora you will also need to initialize the log on the first master.
-
-    mesos-log initialize --path="$AURORA_HOME/scheduler/db"
-
-Failing to do this will result the following message when you try to start the scheduler.
-
-    Replica in EMPTY status received a broadcasted recover request
-
-### Storage Performance Considerations
-
-See [this document](/documentation/latest/scheduler-storage/) for scheduler storage performance considerations.
-
-### Network considerations
-The Aurora scheduler listens on 2 ports - an HTTP port used for client RPCs and a web UI,
-and a libprocess (HTTP+Protobuf) port used to communicate with the Mesos master and for the log
-replication protocol. These can be left unconfigured (the scheduler publishes all selected ports
-to ZooKeeper) or explicitly set in the startup script as follows:
-
-    # ...
-    AURORA_FLAGS=(
-      # ...
-      -http_port=8081
-      # ...
-    )
-    # ...
-    export LIBPROCESS_PORT=8083
-    # ...
-
-## Running Aurora
-Configure a supervisor like [Monit](http://mmonit.com/monit/) or
-[supervisord](http://supervisord.org/) to run the created `scheduler.sh` file and restart it
-whenever it fails. Aurora expects to be restarted by an external process when it fails. Aurora
-supports an active health checking protocol on its admin HTTP interface - if a `GET /health` times
-out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
-restarted.
-
-For example, monit can be configured with
-
-    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
-
-assuming you set `-http_port=8081`.
-
-### Maintaining an Aurora Installation
-
-### Monitoring
-Please see our dedicated [monitoring guide](/documentation/latest/monitoring/) for in-depth discussion on monitoring.
-
-### Running stateful services
-Aurora is best suited to run stateless applications, but it also accommodates for stateful services
-like databases, or services that otherwise need to always run on the same machines.
-
-#### Dedicated attribute
-The Mesos slave has the `--attributes` command line argument which can be used to mark a slave with
-static attributes (not to be confused with `--resources`, which are dynamic and accounted).
-
-Aurora makes these attributes available for matching with scheduling
-[constraints](configuration-reference.md#specifying-scheduling-constraints).  Most of these
-constraints are arbitrary and available for custom use.  There is one exception, though: the
-`dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to run on these
-machines, and will only schedule matching jobs on these machines.
-
-##### Syntax
-The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
-the scheduler requires that the `$role` component matches the `role` field in the job
-configuration, and will reject the job creation otherwise.  The remainder of the attribute is
-free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
-enforce this.
-
-##### Example
-Consider the following slave command line:
-
-    mesos-slave --attributes="host:$HOST;rack:$RACK;dedicated:db_team/redis" ...
-
-And this job configuration:
-
-    Service(
-      name = 'redis',
-      role = 'db_team',
-      constraints = {
-        'dedicated': 'db_team/redis'
-      }
-      ...
-    )
-
-The job configuration is indicating that it should only be scheduled on slaves with the attribute
-`dedicated:dba_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
-constraint from running on those slaves.
-
-
-## Common problems
-So you've started your first cluster and are running into some issues? We've collected some common
-stumbling blocks and solutions here to help get you moving.
-
-### Replicated log not initialized
-
-#### Symptoms
-- Scheduler RPCs and web interface claim `Storage is not READY`
-- Scheduler log repeatedly prints messages like
-
-  ```
-  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
-  received a broadcasted recover request
-  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
-  from a replica in EMPTY status
-  ```
-
-#### Solution
-When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
-consider their database to be empty by [initializing](#initializing-the-replicated-log) the
-replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
-of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
-
-### Scheduler not registered
-
-#### Symptoms
-Scheduler log contains
-
-    Framework has not been registered within the tolerated delay.
-
-#### Solution
-Double-check that the scheduler is configured correctly to reach the master. If you are registering
-the master in ZooKeeper, make sure command line argument to the master:
-
-    --zk=zk://$ZK_HOST:2181/mesos/master
-
-is the same as the one on the scheduler:
-
-    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
-
-### Tasks are stuck in `PENDING` forever
-
-#### Symptoms
-The scheduler is registered, and (receiving offers](docs/monitoring.md#scheduler_resource_offers),
-but tasks are perpetually shown as `PENDING - Constraint not satisfied: host`.
-
-#### Solution
-Check that your slaves are configured with `host` and `rack` attributes.  Aurora requires that
-slaves are tagged with these two common failure domains to ensure that it can safely place tasks
-such that jobs are resilient to failure.
-
-See our [vagrant example](examples/vagrant/upstart/mesos-slave.conf) for details.
diff --git a/source/documentation/latest/developing-aurora-client.md b/source/documentation/latest/developing-aurora-client.md
deleted file mode 100644
index 923866c..0000000
--- a/source/documentation/latest/developing-aurora-client.md
+++ /dev/null
@@ -1,122 +0,0 @@
-Getting Started
-===============
-
-Aurora consists of four main pieces: the scheduler (which finds resources in the cluster that can be used to run a job), the executor (which uses the resources assigned by the scheduler to run a job), the command-line client, and the web-ui. For information about working on the scheduler or the webUI, see the file "developing-aurora-scheduler.md" in this directory.
-
-If you want to work on the command-line client, this is the place for you!
-
-The client is written in Python, and unlike the server side of things, we build the client using the Pants build tool, instead of Gradle. Pants is a tool that was built by twitter for handling builds of large collaborative systems. You can see a detailed explanation of
-pants [here](http://pantsbuild.github.io/python-readme.html).
-
-To build the client executable, run the following in a command-shell:
-
-    $ ./pants src/main/python/apache/aurora/client/cli:aurora2
-
-This will produce a python executable _pex_ file in `dist/aurora2.pex`. Pex files
-are fully self-contained executables: just copy the pex file into your path, and you'll be able to run it. For example, for a typical installation:
-
-    $ cp dist/aurora2.pex /usr/local/bin/aurora
-
-To run all of the client tests:
-
-    $ ./pasts src/test/python/apache/aurora/client/:all
-
-
-Client Configuration
-====================
-
-The client uses a configuration file that specifies available clusters. More information about the
-contents of this file can be found in the
-[Client Cluster Configuration](/documentation/latest/client-cluster-configuration/) documentation. Information about
-how the client locates this file can be found in the
-[Client Commands](client-commands.md#cluster-configuration) documentation.
-
-Client Versions
-===============
-
-There are currently two versions of the aurora client, imaginatively known as v1 and v2. All new development is done entirely in v2, but we continue to support and fix bugs in v1, until we get to the point where v2 is feature-complete and tested, and aurora users have had some time at adapt and switch their processes to use v2.
-
-Both versions are built on the same underlying API code.
-
-Client v1 was implemented using twitter.common.app. The command-line processing code for v1 can be found in `src/main/python/apache/aurora/client/commands` and
-`src/main/python/apache/aurora/client/bin`.
-
-Client v2 was implemented using its own noun/verb framework. The client v2 code can be found in `src/main/python/apache/aurora/client/cli`, and the noun/verb framework can be
-found in the `__init__.py` file in that directory.
-
-
-Building and Testing the Client
-===============================
-
-Building and testing the client code are both done using Pants. The relevant targets to know about are:
-
-   * Build a client v2 executable: `./pants src/main/python/apache/aurora/client/cli:aurora2`
-   * Test client v2 code: `./pants ./pants src/test/python/apache/aurora/client/cli:all`
-   * Build a client v1 executable: `./pants src/main/python/apache/aurora/client/bin:aurora_client`
-   * Test client v1 code: `./pants src/main/python/apache/aurora/client/commands:all`
-   * Test all client code: `./pants src/main/python/apache/aurora/client:all`
-
-
-Overview of the Client Architecture
-===================================
-
-The client is built on a stacked architecture:
-
-   1. At the lowest level, we have a thrift RPC API interface
-    to the aurora scheduler. The interface is declared in thrift, in the file
-    `src/main/thrift/org/apache/aurora/gen/api.thrift`.
-
-  2. On top of the primitive API, we have a client API. The client API
-    takes the primitive operations provided by the scheduler, and uses them
-    to implement client-side behaviors. For example, when you update a job,
-    on the scheduler, that's done by a sequence of operations.  The sequence is implemented
-    by the client API `update` method, which does the following using the thrift API:
-     * fetching the state of task instances in the mesos cluster, and figuring out which need
-       to be updated;
-     * For each task to be updated:
-         - killing the old version;
-         - starting the new version;
-         - monitoring the new version to ensure that the update succeeded.
-  3. On top of the API, we have the command-line client itself. The core client, at this level,
-    consists of the interface to the command-line which the user will use to interact with aurora.
-    The client v2 code is found in `src/python/apache/aurora/client/cli`. In the `cli` directory,
-    the rough structure is as follows:
-       * `__init__.py` contains the noun/verb command-line processing framework used by client v2.
-       * `jobs.py` contains the implementation of the core `job` noun, and all of its operations.
-       * `bridge.py` contains the implementation of a component that allows us to ship a
-         combined client that runs both v1 and v2 client commands during the transition period.
-       * `client.py` contains the code that binds the client v2 nouns and verbs into an executable.
-
-Running/Debugging the Client
-============================
-
-For manually testing client changes against a cluster, we use vagrant. To start a virtual cluster,
-you need to install a working vagrant environment, and then run "vagrant up" for the root of
-the aurora workspace. This will create a vagrant host named "devcluster", with a mesos master,
-a set of mesos slaves, and an aurora scheduler.
-
-To use the devcluster, you need to bring it up by running `vagrant up`, and then connect to the vagrant host using `vagrant ssh`. This will open a bash session on the virtual machine hosting the devcluster. In the home directory, there are two key paths to know about:
-
-   * `~/aurora`: this is a copy of the git workspace in which you launched the vagrant cluster.
-     To test client changes, you'll use this copy.
-   * `/vagrant`: this is a mounted filesystem that's a direct image of your git workspace.
-     This isn't a copy - it is your git workspace. Editing files on your host machine will
-     be immediately visible here, because they are the same files.
-
-Whenever the scheduler is modified, to update your vagrant environment to use the new scheduler,
-you'll need to re-initialize your vagrant images. To do this, you need to run two commands:
-
-   * `vagrant destroy`: this will delete the old devcluster image.
-   * `vagrant up`: this creates a fresh devcluster image based on the current state of your workspace.
-
-You should try to minimize rebuilding vagrant images; it's not horribly slow, but it does take a while.
-
-To test client changes:
-
-   * Make a change in your local workspace, and commit it.
-   * `vagrant ssh` into the devcluster.
-   * `cd aurora`
-   * Pull your changes into the vagrant copy: `git pull /vagrant *branchname*`.
-   * Build the modified client using pants.
-   * Run your command using `aurora2`. (You don't need to do any install; the aurora2 command
-     is a symbolic link to the executable generated by pants.)
diff --git a/source/documentation/latest/development/client.md b/source/documentation/latest/development/client.md
new file mode 100644
index 0000000..c6bec5b
--- /dev/null
+++ b/source/documentation/latest/development/client.md
@@ -0,0 +1,148 @@
+Developing the Aurora Client
+============================
+
+The client is written in Python, and uses the
+[Pants](http://pantsbuild.github.io/python-readme.html) build tool.
+
+
+Building and Testing
+--------------------
+
+Building and testing the client code are both done using Pants. The relevant targets to know about
+are:
+
+   * Build a client executable: `./pants binary src/main/python/apache/aurora/client:aurora`
+   * Test client code: `./pants test src/test/python/apache/aurora/client/cli:cli`
+
+If you want to build a source distribution of the client, you need to run `./build-support/release/make-python-sdists`.
+
+
+Creating Custom Builds
+----------------------
+
+There are situations where you may want to plug in custom logic to the Client that may not be
+applicable to the open source codebase. Rather than create a whole CLI from scratch, you can
+easily create your own custom, drop-in replacement aurora.pex using the pants build tool.
+
+First, create an AuroraCommandLine implementation as an entry-point for registering customizations:
+
+    from apache.aurora.client.cli.client import AuroraCommandLine
+
+    class CustomAuroraCommandLine(AuroraCommandLine):
+    """Custom AuroraCommandLine for your needs"""
+
+    @property
+    def name(self):
+      return "your-company-aurora"
+
+    @classmethod
+    def get_description(cls):
+      return 'Your Company internal Aurora client command line'
+
+    def __init__(self):
+      super(CustomAuroraCommandLine, self).__init__()
+      # Add custom plugins..
+      self.register_plugin(YourCustomPlugin())
+
+    def register_nouns(self):
+      super(CustomAuroraCommandLine, self).register_nouns()
+      # You can even add new commands / sub-commands!
+      self.register_noun(YourStartUpdateProxy())
+      self.register_noun(YourDeployWorkflowCommand())
+
+Secondly, create a main entry point:
+
+    def proxy_main():
+      client = CustomAuroraCommandLine()
+      if len(sys.argv) == 1:
+        sys.argv.append("-h")
+      sys.exit(client.execute(sys.argv[1:]))
+
+Finally, you can wire everything up with a pants BUILD file in your project directory:
+
+    python_binary(
+      name='aurora',
+      entry_point='your_company.aurora.client:proxy_main',
+      dependencies=[
+        ':client_lib'
+      ]
+    )
+
+    python_library(
+      name='client_lib',
+      sources = [
+        'client.py',
+        'custom_plugin.py',
+        'custom_command.py',
+      ],
+      dependencies = [
+        # The Apache Aurora client
+        # Any other dependencies for your custom code
+      ],
+    )
+
+Using the same commands to build the client as above (but obviously pointing to this BUILD file
+instead), you will have a drop-in replacement aurora.pex file with your customizations.
+
+Running/Debugging
+------------------
+
+For manually testing client changes against a cluster, we use [Vagrant](https://www.vagrantup.com/).
+To start a virtual cluster, you need to install Vagrant, and then run `vagrant up` for the root of
+the aurora workspace. This will create a vagrant host named "devcluster", with a Mesos master, a set
+of Mesos agents, and an Aurora scheduler.
+
+If you have a change you would like to test in your local cluster, you'll rebuild the client:
+
+    vagrant ssh -c 'aurorabuild client'
+
+Once this completes, the `aurora` command will reflect your changes.
+
+
+Running/Debugging in PyCharm
+-----------------------------
+
+It's possible to use PyCharm to run and debug both the client and client tests in an IDE. In order
+to do this, first run:
+
+    build-support/python/make-pycharm-virtualenv
+
+This script will configure a virtualenv with all of our Python requirements. Once the script
+completes it will emit instructions for configuring PyCharm:
+
+    Your PyCharm environment is now set up.  You can open the project root
+    directory with PyCharm.
+
+    Once the project is loaded:
+      - open project settings
+      - click 'Project Interpreter'
+      - click the cog in the upper-right corner
+      - click 'Add Local'
+      - select 'build-support/python/pycharm.venv/bin/python'
+      - click 'OK'
+
+### Running/Debugging Tests
+After following these instructions, you should now be able to run/debug tests directly from the IDE
+by right-clicking on a test (or test class) and choosing to run or debug:
+
+[![Debug Client Test](../images/debug-client-test.png)](../images/debug-client-test.png)
+
+If you've set a breakpoint, you can see the run will now stop and let you debug:
+
+[![Debugging Client Test](../images/debugging-client-test.png)](../images/debugging-client-test.png)
+
+### Running/Debugging the Client
+Actually running and debugging the client is unfortunately a bit more complex. You'll need to create
+a Run configuration:
+
+* Go to Run → Edit Configurations
+* Click the + icon to add a new configuration.
+* Choose python and name the configuration 'client'.
+* Set the script path to `/your/path/to/aurora/src/main/python/apache/aurora/client/cli/client.py`
+* Set the script parameters to the command you want to run (e.g. `job status <job key>`)
+* Expand the Environment section and click the ellipsis to add a new environment variable
+* Click the + at the bottom to add a new variable named AURORA_CONFIG_ROOT whose value is the
+  path where the your cluster configuration can be found. For example, to talk to the scheduler
+  running in the vagrant image, it would be set to `/your/path/to/aurora/examples/vagrant` (this
+  is the directory where our example clusters.json is found).
+* You should now be able to run and debug this configuration!
diff --git a/source/documentation/latest/development/committers-guide.md b/source/documentation/latest/development/committers-guide.md
new file mode 100644
index 0000000..aadf254
--- /dev/null
+++ b/source/documentation/latest/development/committers-guide.md
@@ -0,0 +1,105 @@
+Committer's Guide
+=================
+
+Information for official Apache Aurora committers.
+
+Setting up your email account
+-----------------------------
+Once your Apache ID has been set up you can configure your account and add ssh keys and setup an
+email forwarding address at
+
+    http://id.apache.org
+
+Additional instructions for setting up your new committer email can be found at
+
+    http://www.apache.org/dev/user-email.html
+
+The recommended setup is to configure all services (mailing lists, JIRA, ReviewBoard) to send
+emails to your @apache.org email address.
+
+
+Creating a gpg key for releases
+-------------------------------
+In order to create a release candidate you will need a gpg key published to an external key server
+and that key will need to be added to our KEYS file as well.
+
+1. Create a key:
+
+               gpg --gen-key
+
+2. Add your gpg key to the Apache Aurora KEYS file:
+
+               git clone https://gitbox.apache.org/repos/asf/aurora
+               (gpg --list-sigs <KEY ID> && gpg --armor --export <KEY ID>) >> KEYS
+               git add KEYS && git commit -m "Adding gpg key for <APACHE ID>"
+               ./rbt post -o -g
+
+3. Publish the key to an external key server:
+
+               gpg --keyserver pgp.mit.edu --send-keys <KEY ID>
+
+4. Update the changes to the KEYS file to the Apache Aurora svn dist locations listed below:
+
+               https://dist.apache.org/repos/dist/dev/aurora/KEYS
+               https://dist.apache.org/repos/dist/release/aurora/KEYS
+
+5. Add your key to git config for use with the release scripts:
+
+               git config --global user.signingkey <KEY ID>
+
+
+Creating a release
+------------------
+The following will guide you through the steps to create a release candidate, vote, and finally an
+official Apache Aurora release. Before starting your gpg key should be in the KEYS file and you
+must have access to commit to the dist.a.o repositories.
+
+1. Ensure that all issues resolved for this release candidate are tagged with the correct Fix
+Version in JIRA, the changelog script will use this to generate the CHANGELOG in step #2.
+To assign the fix version:
+
+    * Look up the [previous release date](https://issues.apache.org/jira/browse/aurora/?selectedTab=com.atlassian.jira.jira-projects-plugin:versions-panel).
+    * Query all issues resolved after that release date: `project = AURORA AND status in (resolved, Closed) and fixVersion is empty and resolutiondate >= "YYYY/MM/DD"`
+    * In the upper right corner of the query result, select Tools > Bulk Edit.
+    * Select all issues > edit issue > set 'Change Fix Version/s' to the release version.
+    * Make sure to uncheck 'Send mail for this update' at the bottom.
+
+2. Prepare RELEASE-NOTES.md for the release. This just boils down to removing the "(Not yet
+released)" suffix from the impending release.
+
+2. Create a release candidate. This will automatically update the CHANGELOG and commit it, create a
+branch and update the current version within the trunk. To create a minor version update and publish
+it run
+
+               ./build-support/release/release-candidate -l m -p
+
+3. Update, if necessary, the draft email created from the `release-candidate` script in step #2 and
+send the [VOTE] email to the dev@ mailing list. You can verify the release signature and checksums
+by running
+
+               ./build-support/release/verify-release-candidate
+
+4. Wait for the vote to complete. If the vote fails close the vote by replying to the initial [VOTE]
+email sent in step #3 by editing the subject to [RESULT][VOTE] ... and noting the failure reason
+(example [here](http://markmail.org/message/d4d6xtvj7vgwi76f)). You'll also need to manually revert
+the commits generated by the release candidate script that incremented the snapshot version and
+updated the changelog. Once that is done, now address any issues and go back to step #1 and run
+again, this time you will use the -r flag to increment the release candidate version. This will
+automatically clean up the release candidate rc0 branch and source distribution.
+
+               ./build-support/release/release-candidate -l m -r 1 -p
+
+5. Once the vote has successfully passed create the release
+
+**IMPORTANT: make sure to use the correct release at this final step (e.g.: `-r 1` if rc1 candidate
+has been voted for). Once the release tag is pushed it will be very hard to undo due to remote
+git pre-receive hook explicitly forbidding release tag manipulations.**
+
+               ./build-support/release/release
+
+6. Update the draft email created fom the `release` script in step #5 to include the Apache ID's for
+all binding votes and send the [RESULT][VOTE] email to the dev@ mailing list.
+
+7. Update the [Aurora Website](http://aurora.apache.org/) by following the
+[instructions](https://svn.apache.org/repos/asf/aurora/site/README.md) on the ASF Aurora SVN repo.
+Remember to add a blog post under source/blog and regenerate the site before committing.
diff --git a/source/documentation/latest/development/db-migration.md b/source/documentation/latest/development/db-migration.md
new file mode 100644
index 0000000..eb70400
--- /dev/null
+++ b/source/documentation/latest/development/db-migration.md
@@ -0,0 +1,34 @@
+DB Migrations
+=============
+
+Changes to the DB schema should be made in the form of migrations. This ensures that all changes
+are applied correctly after a DB dump from a previous version is restored.
+
+DB migrations are managed through a system built on top of
+[MyBatis Migrations](http://www.mybatis.org/migrations/). The migrations are run automatically when
+a snapshot is restored, no manual interaction is required by cluster operators.
+
+Upgrades
+--------
+When adding or altering tables or changing data, in addition to making to change in
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql), a new
+migration class should be created under the org.apache.aurora.scheduler.storage.db.migration
+package. The class should implement the [MigrationScript](https://github.com/mybatis/migrations/blob/master/src/main/java/org/apache/ibatis/migration/MigrationScript.java)
+interface (see [V001_TestMigration](https://github.com/apache/aurora/blob/rel/0.22.0/src/test/java/org/apache/aurora/scheduler/storage/db/testmigration/V001_TestMigration.java)
+as an example). The upgrade and downgrade scripts are defined in this class. When restoring a
+snapshot the list of migrations on the classpath is compared to the list of applied changes in the
+DB. Any changes that have not yet been applied are executed and their downgrade script is stored
+alongside the changelog entry in the database to faciliate downgrades in the event of a rollback.
+
+Downgrades
+----------
+If, while running migrations, a rollback is detected, i.e. a change exists in the DB changelog that
+does not exist on the classpath, the downgrade script associated with each affected change is
+applied.
+
+Baselines
+---------
+After enough time has passed (at least 1 official release), it should be safe to baseline migrations
+if desired. This can be accomplished by ensuring the changes from migrations have been applied to
+[schema.sql](../../src/main/resources/org/apache/aurora/scheduler/storage/db/schema.sql) and then
+removing the corresponding migration classes and adding a migration to remove the changelog entries.
\ No newline at end of file
diff --git a/source/documentation/latest/development/design-documents.md b/source/documentation/latest/development/design-documents.md
new file mode 100644
index 0000000..ee92a60
--- /dev/null
+++ b/source/documentation/latest/development/design-documents.md
@@ -0,0 +1,23 @@
+Design Documents
+================
+
+Since its inception as an Apache project, larger feature additions to the
+Aurora code base are discussed in form of design documents. Design documents
+are living documents until a consensus has been reached to implement a feature
+in the proposed form.
+
+Current and past documents:
+
+* [Command Hooks for the Aurora Client](../design/command-hooks/)
+* [Dynamic Reservations](https://docs.google.com/document/d/19gV8Po6DIHO14tOC7Qouk8RnboY8UCfRTninwn_5-7c/edit)
+* [GPU Resources in Aurora](https://docs.google.com/document/d/1J9SIswRMpVKQpnlvJAMAJtKfPP7ZARFknuyXl-2aZ-M/edit)
+* [Health Checks for Updates](https://docs.google.com/document/d/1KOO0LC046k75TqQqJ4c0FQcVGbxvrn71E10wAjMorVY/edit)
+* [JobUpdateDiff thrift API](https://docs.google.com/document/d/1Fc_YhhV7fc4D9Xv6gJzpfooxbK4YWZcvzw6Bd3qVTL8/edit)
+* [REST API RFC](https://docs.google.com/document/d/11_lAsYIRlD5ETRzF2eSd3oa8LXAHYFD8rSetspYXaf4/edit)
+* [Revocable Mesos offers in Aurora](https://docs.google.com/document/d/1r1WCHgmPJp5wbrqSZLsgtxPNj3sULfHrSFmxp2GyPTo/edit)
+* [Supporting the Mesos Universal Containerizer](https://docs.google.com/document/d/111T09NBF2zjjl7HE95xglsDpRdKoZqhCRM5hHmOfTLA/edit?usp=sharing)
+* [Tier Management In Apache Aurora](https://docs.google.com/document/d/1erszT-HsWf1zCIfhbqHlsotHxWUvDyI2xUwNQQQxLgs/edit?usp=sharing)
+* [Ubiquitous Jobs](https://docs.google.com/document/d/12hr6GnUZU3mc7xsWRzMi3nQILGB-3vyUxvbG-6YmvdE/edit)
+* [Pluggable Scheduling](https://docs.google.com/document/d/1fVHLt9AF-YbOCVCDMQmi5DATVusn-tqY8DldKbjVEm0/edit)
+
+Design documents can be found in the Aurora issue tracker via the query [`project = AURORA AND text ~ "docs.google.com" ORDER BY created`](https://issues.apache.org/jira/browse/AURORA-1528?jql=project%20%3D%20AURORA%20AND%20text%20~%20%22docs.google.com%22%20ORDER%20BY%20created).
diff --git a/source/documentation/latest/development/design/command-hooks.md b/source/documentation/latest/development/design/command-hooks.md
new file mode 100644
index 0000000..3f3f70f
--- /dev/null
+++ b/source/documentation/latest/development/design/command-hooks.md
@@ -0,0 +1,102 @@
+# Command Hooks for the Aurora Client
+
+## Introduction/Motivation
+
+We've got hooks in the client that surround API calls. These are
+pretty awkward, because they don't correlate with user actions. For
+example, suppose we wanted a policy that said users weren't allowed to
+kill all instances of a production job at once.
+
+Right now, all that we could hook would be the "killJob" api call. But
+kill (at least in newer versions of the client) normally runs in
+batches. If a user called killall, what we would see on the API level
+is a series of "killJob" calls, each of which specified a batch of
+instances. We woudn't be able to distinguish between really killing
+all instances of a job (which is forbidden under this policy), and
+carefully killing in batches (which is permitted.) In each case, the
+hook would just see a series of API calls, and couldn't find out what
+the actual command being executed was!
+
+For most policy enforcement, what we really want to be able to do is
+look at and vet the commands that a user is performing, not the API
+calls that the client uses to implement those commands.
+
+So I propose that we add a new kind of hooks, which surround noun/verb
+commands. A hook will register itself to handle a collection of (noun,
+verb) pairs. Whenever any of those noun/verb commands are invoked, the
+hooks methods will be called around the execution of the verb. A
+pre-hook will have the ability to reject a command, preventing the
+verb from being executed.
+
+## Registering Hooks
+
+These hooks will be registered via configuration plugins. A configuration plugin
+can register hooks using an API. Hooks registered this way are, effectively,
+hardwired into the client executable.
+
+The order of execution of hooks is unspecified: they may be called in
+any order. There is no way to guarantee that one hook will execute
+before some other hook.
+
+
+### Global Hooks
+
+Commands registered by the python call are called _global_ hooks,
+because they will run for all configurations, whether or not they
+specify any hooks in the configuration file.
+
+In the implementation, hooks are registered in the module
+`apache.aurora.client.cli.command_hooks`, using the class
+`GlobalCommandHookRegistry`. A global hook can be registered by calling
+`GlobalCommandHookRegistry.register_command_hook` in a configuration plugin.
+
+### The API
+
+    class CommandHook(object)
+      @property
+      def name(self):
+        """Returns a name for the hook."
+
+      def get_nouns(self):
+        """Return the nouns that have verbs that should invoke this hook."""
+
+      def get_verbs(self, noun):
+        """Return the verbs for a particular noun that should invoke his hook."""
+
+      @abstractmethod
+      def pre_command(self, noun, verb, context, commandline):
+        """Execute a hook before invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        Returns: True if the command should be allowed to proceed; False if the command
+        should be rejected.
+        """
+
+      def post_command(self, noun, verb, context, commandline, result):
+        """Execute a hook after invoking a verb.
+        * noun: the noun being invoked.
+        * verb: the verb being invoked.
+        * context: the context object that will be used to invoke the verb.
+          The options object will be initialized before calling the hook
+        * commandline: the original argv collection used to invoke the client.
+        * result: the result code returned by the verb.
+        Returns: nothing
+        """
+
+    class GlobalCommandHookRegistry(object):
+      @classmethod
+      def register_command_hook(self, hook):
+        pass
+
+### Skipping Hooks
+
+To skip a hook, a user uses a command-line option, `--skip-hooks`. The option can either
+specify specific hooks to skip, or "all":
+
+* `aurora --skip-hooks=all job create east/bozo/devel/myjob` will create a job
+  without running any hooks.
+* `aurora --skip-hooks=test,iq create east/bozo/devel/myjob` will create a job,
+  and will skip only the hooks named "test" and "iq".
diff --git a/source/documentation/latest/development/scheduler.md b/source/documentation/latest/development/scheduler.md
new file mode 100644
index 0000000..fc0836a
--- /dev/null
+++ b/source/documentation/latest/development/scheduler.md
@@ -0,0 +1,118 @@
+Developing the Aurora Scheduler
+===============================
+
+The Aurora scheduler is written in Java code and built with [Gradle](http://gradle.org).
+
+
+Prerequisite
+============
+
+When using Apache Aurora checked out from the source repository or the binary
+distribution, the Gradle wrapper and JavaScript dependencies are provided.
+However, you need to manually install them when using the source release
+downloads:
+
+1. Install Gradle following the instructions on the [Gradle web site](http://gradle.org)
+2. From the root directory of the Apache Aurora project generate the Gradle
+wrapper by running:
+
+    gradle wrapper
+
+
+Getting Started
+===============
+
+You will need Java 8 installed and on your `PATH` or unzipped somewhere with `JAVA_HOME` set. Then
+
+    ./gradlew tasks
+
+will bootstrap the build system and show available tasks. This can take a while the first time you
+run it but subsequent runs will be much faster due to cached artifacts.
+
+Running the Tests
+-----------------
+Aurora has a comprehensive unit test suite. To run the tests use
+
+    ./gradlew build
+
+Gradle will only re-run tests when dependencies of them have changed. To force a re-run of all
+tests use
+
+    ./gradlew clean build
+
+Running the build with code quality checks
+------------------------------------------
+To speed up development iteration, the plain gradle commands will not run static analysis tools.
+However, you should run these before posting a review diff, and **always** run this before pushing a
+commit to origin/master.
+
+    ./gradlew build -Pq
+
+Running integration tests
+-------------------------
+To run the same tests that are run in the Apache Aurora continuous integration
+environment:
+
+    ./build-support/jenkins/build.sh
+
+In addition, there is an end-to-end test that runs a suite of aurora commands
+using a virtual cluster:
+
+    ./src/test/sh/org/apache/aurora/e2e/test_end_to_end.sh
+
+Creating a bundle for deployment
+--------------------------------
+Gradle can create a zip file containing Aurora, all of its dependencies, and a launch script with
+
+    ./gradlew distZip
+
+or a tar file containing the same files with
+
+    ./gradlew distTar
+
+The output file will be written to `dist/distributions/aurora-scheduler.zip` or
+`dist/distributions/aurora-scheduler.tar`.
+
+
+
+Developing Aurora Java code
+===========================
+
+Setting up an IDE
+-----------------
+Gradle can generate project files for your IDE. To generate an IntelliJ IDEA project run
+
+    ./gradlew idea
+
+and import the generated `aurora.ipr` file.
+
+Adding or Upgrading a Dependency
+--------------------------------
+New dependencies can be added from Maven central by adding a `compile` dependency to `build.gradle`.
+For example, to add a dependency on `com.example`'s `example-lib` 1.0 add this block:
+
+    compile 'com.example:example-lib:1.0'
+
+NOTE: Anyone thinking about adding a new dependency should first familiarize themselves with the
+Apache Foundation's third-party licensing
+[policy](http://www.apache.org/legal/resolved.html#category-x).
+
+
+
+Developing the Aurora Build System
+==================================
+
+Bootstrapping Gradle
+--------------------
+The following files were autogenerated by `gradle wrapper` using gradle's
+[Wrapper](http://www.gradle.org/docs/current/dsl/org.gradle.api.tasks.wrapper.Wrapper.html) plugin and
+should not be modified directly:
+
+    ./gradlew
+    ./gradlew.bat
+    ./gradle/wrapper/gradle-wrapper.jar
+    ./gradle/wrapper/gradle-wrapper.properties
+
+To upgrade Gradle unpack the new version somewhere, run `/path/to/new/gradle wrapper` in the
+repository root and commit the changed files.
+
diff --git a/source/documentation/latest/development/thermos.md b/source/documentation/latest/development/thermos.md
new file mode 100644
index 0000000..0efe130
--- /dev/null
+++ b/source/documentation/latest/development/thermos.md
@@ -0,0 +1,126 @@
+The Python components of Aurora are built using [Pants](https://pantsbuild.github.io).
+
+
+Python Build Conventions
+========================
+The Python code is laid out according to the following conventions:
+
+1. 1 `BUILD` per 3rd level directory. For a list of current top-level packages run:
+
+        % find src/main/python -maxdepth 3 -mindepth 3 -type d |\
+        while read dname; do echo $dname |\
+            sed 's@src/main/python/\(.*\)/\(.*\)/\(.*\).*@\1.\2.\3@'; done
+
+2.  Each `BUILD` file exports 1
+    [`python_library`](https://pantsbuild.github.io/build_dictionary.html#bdict_python_library)
+    that provides a
+    [`setup_py`](https://pantsbuild.github.io/build_dictionary.html#setup_py)
+    containing each
+    [`python_binary`](https://pantsbuild.github.io/build_dictionary.html#python_binary)
+    in the `BUILD` file, named the same as the directory it's in so that it can be referenced
+    without a ':' character. The `sources` field in the `python_library` will almost always be
+    `rglobs('*.py')`.
+
+3.  Other BUILD files may only depend on this single public `python_library`
+    target. Any other target is considered a private implementation detail and
+    should be prefixed with an `_`.
+
+4.  `python_binary` targets are always named the same as the exported console script.
+
+5.  `python_binary` targets must have identical `dependencies` to the `python_library` exported
+    by the package and must use `entry_point`.
+
+    The means a PEX file generated by pants will contain exactly the same files that will be
+    available on the `PYTHONPATH` in the case of `pip install` of the corresponding library
+    target. This will help our migration off of Pants in the future.
+
+Annotated example - apache.thermos.runner
+-----------------------------------------
+
+    % find src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner
+    src/main/python/apache/thermos/runner/__init__.py
+    src/main/python/apache/thermos/runner/thermos_runner.py
+    src/main/python/apache/thermos/runner/BUILD
+    % cat src/main/python/apache/thermos/runner/BUILD
+    # License boilerplate omitted
+    import os
+
+
+    # Private target so that a setup_py can exist without a circular dependency. Only targets within
+    # this file should depend on this.
+    python_library(
+      name = '_runner',
+      # The target covers every python file under this directory and subdirectories.
+      sources = rglobs('*.py'),
+      dependencies = [
+        '3rdparty/python:twitter.common.app',
+        '3rdparty/python:twitter.common.log',
+        # Source dependencies are always referenced without a ':'.
+        'src/main/python/apache/thermos/common',
+        'src/main/python/apache/thermos/config',
+        'src/main/python/apache/thermos/core',
+      ],
+    )
+
+    # Binary target for thermos_runner.pex. Nothing should depend on this - it's only used as an
+    # argument to ./pants binary.
+    python_binary(
+      name = 'thermos_runner',
+      # Use entry_point, not source so the files used here are the same ones tests see.
+      entry_point = 'apache.thermos.bin.thermos_runner',
+      dependencies = [
+        # Notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+    )
+
+    # The public library that everyone importing the runner symbols uses.
+    # The test targets and any other dependent source code should depend on this.
+    python_library(
+      name = 'runner',
+      dependencies = [
+        # Again, notice that we depend only on the single private target from this BUILD file here.
+        ':_runner',
+      ],
+      # We always provide a setup_py. This will cause any dependee libraries to automatically
+      # reference this library in their requirements.txt rather than copy the source files into their
+      # sdist.
+      provides = setup_py(
+        # Conventionally named and versioned.
+        name = 'apache.thermos.runner',
+        version = open(os.path.join(get_buildroot(), '.auroraversion')).read().strip().upper(),
+      ).with_binaries({
+        # Every binary in this file should also be repeated here.
+        # Always use the dict-form of .with_binaries so that commands with dashes in their names are
+        # supported.
+        # The console script name is always the same as the PEX with .pex stripped.
+        'thermos_runner': ':thermos_runner',
+      }),
+    )
+
+
+
+Thermos Test resources
+======================
+
+The Aurora source repository and distributions contain several
+[binary files](../../src/test/resources/org/apache/thermos/root/checkpoints) to
+qualify the backwards-compatibility of thermos with checkpoint data. Since
+thermos persists state to disk, to be read by the thermos observer), it is important that we have
+tests that prevent regressions affecting the ability to parse previously-written data.
+
+The files included represent persisted checkpoints that exercise different
+features of thermos. The existing files should not be modified unless
+we are accepting backwards incompatibility, such as with a major release.
+
+It is not practical to write source code to generate these files on the fly,
+as source would be vulnerable to drift (e.g. due to refactoring) in ways
+that would undermine the goal of ensuring backwards compatibility.
+
+The most common reason to add a new checkpoint file would be to provide
+coverage for new thermos features that alter the data format. This is
+accomplished by writing and running a
+[job configuration](../../reference/configuration/) that exercises the feature, and
+copying the checkpoint file from the sandbox directory, by default this is
+`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/latest/development/thrift.md b/source/documentation/latest/development/thrift.md
new file mode 100644
index 0000000..81b74b9
--- /dev/null
+++ b/source/documentation/latest/development/thrift.md
@@ -0,0 +1,54 @@
+Thrift
+======
+
+Aurora uses [Apache Thrift](https://thrift.apache.org/) for representing structured data in
+client/server RPC protocol as well as for internal data storage. While Thrift is capable of
+correctly handling additions and renames of the existing members, field removals must be done
+carefully to ensure backwards compatibility and provide predictable deprecation cycle. This
+document describes general guidelines for making Thrift schema changes to the existing fields in
+[api.thrift](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+It is highly recommended to go through the
+[Thrift: The Missing Guide](http://diwakergupta.github.io/thrift-missing-guide/) first to refresh on
+basic Thrift schema concepts.
+
+Checklist
+---------
+Every existing Thrift schema modification is unique in its requirements and must be analyzed
+carefully to identify its scope and expected consequences. The following checklist may help in that
+analysis:
+* Is this a new field/struct? If yes, go ahead
+* Is this a pure field/struct rename without any type/structure change? If yes, go ahead and rename
+* Anything else, read further to make sure your change is properly planned
+
+Deprecation cycle
+-----------------
+Any time a breaking change (e.g.: field replacement or removal) is required, the following cycle
+must be followed:
+
+### vCurrent
+Change is applied in a way that does not break scheduler/client with this version to
+communicate with scheduler/client from vCurrent-1.
+* Do not remove or rename the old field
+* Add a new field as an eventual replacement of the old one and implement a dual read/write
+anywhere the old field is used. If a thrift struct is mapped in the DB store make sure both columns
+are marked as `NOT NULL`
+* Check [storage.thrift](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/storage.thrift) to see if
+the affected struct is stored in Aurora scheduler storage. If so, it's almost certainly also
+necessary to perform a [DB migration](../db-migration/).
+* Add a deprecation jira ticket into the vCurrent+1 release candidate
+* Add a TODO for the deprecated field mentioning the jira ticket
+
+### vCurrent+1
+Finalize the change by removing the deprecated fields from the Thrift schema.
+* Drop any dual read/write routines added in the previous version
+* Remove thrift backfilling in scheduler
+* Remove the deprecated Thrift field
+
+Testing
+-------
+It's always advisable to test your changes in the local vagrant environment to build more
+confidence that you change is backwards compatible. It's easy to simulate different
+client/scheduler versions by playing with `aurorabuild` command. See [this document](../../getting-started/vagrant/)
+for more.
+
diff --git a/source/documentation/latest/development/ui.md b/source/documentation/latest/development/ui.md
new file mode 100644
index 0000000..f41614e
--- /dev/null
+++ b/source/documentation/latest/development/ui.md
@@ -0,0 +1,46 @@
+Developing the Aurora Scheduler UI
+==================================
+
+Installing bower (optional)
+----------------------------
+Third party JS libraries used in Aurora (located at 3rdparty/javascript/bower_components) are
+managed by bower, a JS dependency manager. Bower is only required if you plan to add, remove or
+update JS libraries. Bower can be installed using the following command:
+
+    npm install -g bower
+
+Bower depends on node.js and npm. The easiest way to install node on a mac is via brew:
+
+    brew install node
+
+For more node.js installation options refer to https://github.com/joyent/node/wiki/Installation.
+
+More info on installing and using bower can be found at: http://bower.io/. Once installed, you can
+use the following commands to view and modify the bower repo at
+3rdparty/javascript/bower_components
+
+    bower list
+    bower install <library name>
+    bower remove <library name>
+    bower update <library name>
+    bower help
+
+
+Faster Iteration in Vagrant
+---------------------------
+The scheduler serves UI assets from the classpath. For production deployments this means the assets
+are served from within a jar. However, for faster development iteration, the vagrant image is
+configured to add the `scheduler` subtree of `/vagrant/dist/resources/main` to the head of
+`CLASSPATH`. This path is configured as a shared filesystem to the path on the host system where
+your Aurora repository lives. This means that any updates under `dist/resources/main/scheduler` in
+your checkout will be reflected immediately in the UI served from within the vagrant image.
+
+The one caveat to this is that this path is under `dist` not `src`. This is because the assets must
+be processed by gradle before they can be served. So, unfortunately, you cannot just save your local
+changes and see them reflected in the UI, you must first run `./gradlew processResources`. This is
+less than ideal, but better than having to restart the scheduler after every change. Additionally,
+gradle makes this process somewhat easier with the use of the `--continuous` flag. If you run:
+`./gradlew processResources --continuous` gradle will monitor the filesystem for changes and run the
+task automatically as necessary. This doesn't quite provide hot-reload capabilities, but it does
+allow for <5s from save to changes being visibile in the UI with no further action required on the
+part of the developer.
diff --git a/source/documentation/latest/features/constraints.md b/source/documentation/latest/features/constraints.md
new file mode 100644
index 0000000..4ade848
--- /dev/null
+++ b/source/documentation/latest/features/constraints.md
@@ -0,0 +1,126 @@
+Scheduling Constraints
+======================
+
+By default, Aurora will pick any random agent with sufficient resources
+in order to schedule a task. This scheduling choice can be further
+restricted with the help of constraints.
+
+
+Mesos Attributes
+----------------
+
+Data centers are often organized with hierarchical failure domains.  Common failure domains
+include hosts, racks, rows, and PDUs.  If you have this information available, it is wise to tag
+the Mesos agent with them as
+[attributes](https://mesos.apache.org/documentation/attributes-resources/).
+
+The Mesos agent `--attributes` command line argument can be used to mark agents with
+static key/value pairs, so called attributes (not to be confused with `--resources`, which are
+dynamic and accounted).
+
+For example, consider the host `cluster1-aaa-03-sr2` and its following attributes (given in
+key:value format): `host:cluster1-aaa-03-sr2` and `rack:aaa`.
+
+Aurora makes these attributes available for matching with scheduling constraints.
+
+
+Limit Constraints
+-----------------
+
+Limit constraints allow to control machine diversity using constraints. The below
+constraint ensures that no more than two instances of your job may run on a single host.
+Think of this as a "group by" limit.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'host': 'limit:2',
+      }
+      ...
+    )
+
+
+Likewise, you can use constraints to control rack diversity, e.g. at
+most one task per rack:
+
+    constraints = {
+      'rack': 'limit:1',
+    }
+
+Use these constraints sparingly as they can dramatically reduce Tasks' schedulability.
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+
+Value Constraints
+-----------------
+
+Value constraints can be used to express that a certain attribute with a certain value
+should be present on a Mesos agent. For example, the following job would only be
+scheduled on nodes that claim to have an `SSD` as their disk.
+
+    Service(
+      name = 'webservice',
+      role = 'www-data',
+      constraints = {
+        'disk': 'SSD',
+      }
+      ...
+    )
+
+
+Further details are available in the reference documentation on
+[Scheduling Constraints](../../reference/configuration/#specifying-scheduling-constraints).
+
+
+Running stateful services
+-------------------------
+
+Aurora is best suited to run stateless applications, but it also accommodates for stateful services
+like databases, or services that otherwise need to always run on the same machines.
+
+### Dedicated attribute
+
+Most of the Mesos attributes arbitrary and available for custom use.  There is one exception,
+though: the `dedicated` attribute.  Aurora treats this specially, and only allows matching jobs to
+run on these machines, and will only schedule matching jobs on these machines.
+
+
+#### Syntax
+The dedicated attribute has semantic meaning. The format is `$role(/.*)?`. When a job is created,
+the scheduler requires that the `$role` component matches the `role` field in the job
+configuration, and will reject the job creation otherwise.  The remainder of the attribute is
+free-form. We've developed the idiom of formatting this attribute as `$role/$job`, but do not
+enforce this. For example: a job `devcluster/www-data/prod/hello` with a dedicated constraint set as
+`www-data/web.multi` will have its tasks scheduled only on Mesos agents configured with:
+`--attributes=dedicated:www-data/web.multi`.
+
+A wildcard (`*`) may be used for the role portion of the dedicated attribute, which will allow any
+owner to elect for a job to run on the host(s). For example: tasks from both
+`devcluster/www-data/prod/hello` and `devcluster/vagrant/test/hello` with a dedicated constraint
+formatted as `*/web.multi` will be scheduled only on Mesos agents configured with
+`--attributes=dedicated:*/web.multi`. This may be useful when assembling a virtual cluster of
+machines sharing the same set of traits or requirements.
+
+##### Example
+Consider the following agent command line:
+
+    mesos-slave --attributes="dedicated:db_team/redis" ...
+
+And this job configuration:
+
+    Service(
+      name = 'redis',
+      role = 'db_team',
+      constraints = {
+        'dedicated': 'db_team/redis'
+      }
+      ...
+    )
+
+The job configuration is indicating that it should only be scheduled on agents with the attribute
+`dedicated:db_team/redis`.  Additionally, Aurora will prevent any tasks that do _not_ have that
+constraint from running on those agents.
+
diff --git a/source/documentation/latest/features/containers.md b/source/documentation/latest/features/containers.md
new file mode 100644
index 0000000..5bf1601
--- /dev/null
+++ b/source/documentation/latest/features/containers.md
@@ -0,0 +1,130 @@
+Containers
+==========
+
+Aurora supports several containerizers, notably the Mesos containerizer and the Docker
+containerizer. The Mesos containerizer uses native OS features directly to provide isolation between
+containers, while the Docker containerizer delegates container management to the Docker engine.
+
+The support for launching container images via both containerizers has to be
+[enabled by a cluster operator](../../operations/configuration/#containers).
+
+Mesos Containerizer
+-------------------
+
+The Mesos containerizer is the native Mesos containerizer solution. It allows tasks to be
+run with an array of [pluggable isolators](../resource-isolation/) and can launch tasks using
+[Docker](https://github.com/docker/docker/blob/master/image/spec/v1.md) images,
+[AppC](https://github.com/appc/spec/blob/master/SPEC.md) images, or directly on the agent host
+filesystem.
+
+The following example (available in our [Vagrant environment](../../getting-started/vagrant/))
+launches a hello world example within a `debian/jessie` Docker image:
+
+    $ cat /vagrant/examples/jobs/hello_docker_image.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_image',
+        task = task,
+        container = Mesos(image=DockerImage(name='debian', tag='jessie'))
+      )
+    ]
+
+Docker and Appc images are designated using an appropriate `image` property of the `Mesos`
+configuration object. If either `container` or `image` is left unspecified, the host filesystem
+will be used. Further details of how to specify images can be found in the
+[Reference Documentation](../../reference/configuration/#mesos-object).
+
+By default, Aurora launches processes as the Linux user named like the used role (e.g. `www-data`
+in the example above). This user has to exist on the host filesystem. If it does not exist within
+the container image, it will be created automatically. Otherwise, this user and its primary group
+has to exist in the image with matching uid/gid.
+
+For more information on the Mesos containerizer filesystem, namespace, and isolator features, visit
+[Mesos Containerizer](http://mesos.apache.org/documentation/latest/mesos-containerizer/) and
+[Mesos Container Images](http://mesos.apache.org/documentation/latest/container-image/).
+
+
+Docker Containerizer
+--------------------
+
+The Docker containerizer launches container images using the Docker engine. It may often provide
+more advanced features than the native Mesos containerizer, but has to be installed separately to
+Mesos on each agent host.
+
+Starting with the 0.17.0 release, `image` can be specified with a `{{docker.image[name][tag]}}` binder so that
+the tag can be resolved to a concrete image digest. This ensures that the job always uses the same image
+across restarts, even if the version identified by the tag has been updated, guaranteeing that only job
+updates can mutate configuration.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_docker_engine.aurora
+    hello_loop = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = Task(
+      processes = [hello_loop],
+      resources = Resources(cpu=1, ram=1*MB, disk=8*MB)
+    )
+
+    jobs = [
+      Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker',
+        task = task,
+        container = Docker(image = 'python:2.7')
+      ), Service(
+        cluster = 'devcluster',
+        environment = 'devel',
+        role = 'www-data',
+        name = 'hello_docker_engine_binding',
+        task = task,
+        container = Docker(image = '{{docker.image[library/python][2.7]}}')
+      )
+    ]
+
+Note, this feature requires a v2 Docker registry. If using a private Docker registry its url
+must be specified in the `clusters.json` configuration file under the key `docker_registry`.
+If not specified `docker_registry` defaults to `https://registry-1.docker.io` (Docker Hub).
+
+Example:
+    # clusters.json
+    [{
+      "name": "devcluster",
+      ...
+      "docker_registry": "https://registry.example.com"
+    }]
+
+Details of how to use Docker via the Docker engine can be found in the
+[Reference Documentation](../../reference/configuration/#docker-object). Please note that in order to
+correctly execute processes inside a job, the Docker container must have Python 2.7 and potentitally
+further Mesos dependencies installed. This limitation does not hold for Docker containers used via
+the Mesos containerizer.
+
+For more information on launching Docker containers through the Docker containerizer, visit
+[Docker Containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
diff --git a/source/documentation/latest/features/cron-jobs.md b/source/documentation/latest/features/cron-jobs.md
new file mode 100644
index 0000000..bcc94dc
--- /dev/null
+++ b/source/documentation/latest/features/cron-jobs.md
@@ -0,0 +1,124 @@
+# Cron Jobs
+
+Aurora supports execution of scheduled jobs on a Mesos cluster using cron-style syntax.
+
+- [Overview](#overview)
+- [Collision Policies](#collision-policies)
+- [Failure recovery](#failure-recovery)
+- [Interacting with cron jobs via the Aurora CLI](#interacting-with-cron-jobs-via-the-aurora-cli)
+	- [cron schedule](#cron-schedule)
+	- [cron deschedule](#cron-deschedule)
+	- [cron start](#cron-start)
+	- [job killall, job restart, job kill](#job-killall-job-restart-job-kill)
+- [Technical Note About Syntax](#technical-note-about-syntax)
+- [Caveats](#caveats)
+	- [Failovers](#failovers)
+	- [Collision policy is best-effort](#collision-policy-is-best-effort)
+	- [Timezone Configuration](#timezone-configuration)
+
+## Overview
+
+A job is identified as a cron job by the presence of a
+`cron_schedule` attribute containing a cron-style schedule in the
+[`Job`](../../reference/configuration/#job-objects) object. Examples of cron schedules
+include "every 5 minutes" (`*/5 * * * *`), "Fridays at 17:00" (`* 17 * * FRI`), and
+"the 1st and 15th day of the month at 03:00" (`0 3 1,15 *`).
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/cron_hello_world.aurora
+    # A cron job that runs every 5 minutes.
+    jobs = [
+      Job(
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'test',
+        name = 'cron_hello_world',
+        cron_schedule = '*/5 * * * *',
+        task = SimpleTask(
+          'cron_hello_world',
+          'echo "Hello world from cron, the time is now $(date --rfc-822)"'),
+      ),
+    ]
+
+## Collision Policies
+
+The `cron_collision_policy` field specifies the scheduler's behavior when a new cron job is
+triggered while an older run hasn't finished. The scheduler has two policies available:
+
+* `KILL_EXISTING`: The default policy - on a collision the old instances are killed and a instances with the current
+configuration are started.
+* `CANCEL_NEW`: On a collision the new run is cancelled.
+
+Note that the use of `CANCEL_NEW` is likely a code smell - interrupted cron jobs should be able
+to recover their progress on a subsequent invocation, otherwise they risk having their work queue
+grow faster than they can process it.
+
+## Failure recovery
+
+Unlike with services, which aurora will always re-execute regardless of exit status, instances of
+cron jobs retry according to the `max_task_failures` attribute of the
+[Task](../../reference/configuration/#task-object) object. To get "run-until-success" semantics,
+set `max_task_failures` to `-1`.
+
+## Interacting with cron jobs via the Aurora CLI
+
+Most interaction with cron jobs takes place using the `cron` subcommand. See `aurora cron -h`
+for up-to-date usage instructions.
+
+### cron schedule
+Schedules a new cron job on the Aurora cluster for later runs or replaces the existing cron template
+with a new one. Only future runs will be affected, any existing active tasks are left intact.
+
+    $ aurora cron schedule devcluster/www-data/test/cron_hello_world /vagrant/examples/jobs/cron_hello_world.aurora
+
+### cron deschedule
+Deschedules a cron job, preventing future runs but allowing current runs to complete.
+
+    $ aurora cron deschedule devcluster/www-data/test/cron_hello_world
+
+### cron start
+Start a cron job immediately, outside of its normal cron schedule.
+
+    $ aurora cron start devcluster/www-data/test/cron_hello_world
+
+### job killall, job restart, job kill
+Cron jobs create instances running on the cluster that you can interact with like normal Aurora
+tasks with `job kill` and `job restart`.
+
+
+## Technical Note About Syntax
+
+`cron_schedule` uses a restricted subset of BSD crontab syntax. While the
+execution engine currently uses Quartz, the schedule parsing is custom, a subset of FreeBSD
+[crontab(5)](http://www.freebsd.org/cgi/man.cgi?crontab(5)) syntax. See
+[the source](https://github.com/apache/aurora/blob/master/src/main/java/org/apache/aurora/scheduler/cron/CrontabEntry.java#L106-L124)
+for details.
+
+
+## Caveats
+
+### Failovers
+No failover recovery. Aurora does not record the latest minute it fired
+triggers for across failovers. Therefore it's possible to miss triggers
+on failover. Note that this behavior may change in the future.
+
+It's necessary to sync time between schedulers with something like `ntpd`.
+Clock skew could cause double or missed triggers in the case of a failover.
+
+### Collision policy is best-effort
+Aurora aims to always have *at least one copy* of a given instance running at a time - it's
+an AP system, meaning it chooses Availability and Partition Tolerance at the expense of
+Consistency.
+
+If your collision policy was `CANCEL_NEW` and a task has terminated but
+Aurora has not noticed this Aurora will go ahead and create your new
+task.
+
+If your collision policy was `KILL_EXISTING` and a task was marked `LOST`
+but not yet GCed Aurora will go ahead and create your new task without
+attempting to kill the old one (outside the GC interval).
+
+### Timezone Configuration
+Cron timezone is configured indepdendently of JVM timezone with the `-cron_timezone` flag and
+defaults to UTC.
diff --git a/source/documentation/latest/features/custom-executors.md b/source/documentation/latest/features/custom-executors.md
new file mode 100644
index 0000000..da3f2a2
--- /dev/null
+++ b/source/documentation/latest/features/custom-executors.md
@@ -0,0 +1,166 @@
+Custom Executors
+================
+
+If the need arises to use a Mesos executor other than the Thermos executor, the scheduler can be
+configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+The configuration file must be a valid **JSON array** and contain, at minimum,
+one executor configuration including the name, command and resources fields and
+must be pointed to by the `-custom_executor_config` flag when the scheduler is
+started.
+
+### Array Entry
+
+Property                 | Description
+-----------------------  | ---------------------------------
+executor (required)      | Description of executor.
+task_prefix (required) ) | Prefix given to tasks launched with this executor's configuration.
+volume_mounts (optional) | Volumes to be mounted in container running executor.
+
+#### executor
+
+Property                 | Description
+-----------------------  | ---------------------------------
+name (required)          | Name of the executor.
+command (required)       | How to run the executor.
+resources (required)     | Overhead to use for each executor instance.
+
+#### command
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | The command to execute.
+arguments (optional)     | A list of arguments to pass to the command.
+uris (optional)          | List of resources to download into the task sandbox.
+shell (optional)         | Run executor via shell.
+
+A note on the command property (from [mesos.proto](https://github.com/apache/mesos/blob/master/include/mesos/mesos.proto)):
+
+```
+1) If 'shell == true', the command will be launched via shell
+   (i.e., /bin/sh -c 'value'). The 'value' specified will be
+   treated as the shell command. The 'arguments' will be ignored.
+2) If 'shell == false', the command will be launched by passing
+   arguments to an executable. The 'value' specified will be
+   treated as the filename of the executable. The 'arguments'
+   will be treated as the arguments to the executable. This is
+   similar to how POSIX exec families launch processes (i.e.,
+   execlp(value, arguments(0), arguments(1), ...)).
+```
+
+##### uris (list)
+* Follows the [Mesos Fetcher schema](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Property                 | Description
+-----------------------  | ---------------------------------
+value (required)         | Path to the resource needed in the sandbox.
+executable (optional)    | Change resource to be executable via chmod.
+extract (optional)       | Extract files from packed or compressed archives into the sandbox.
+cache (optional)         | Use caching mechanism provided by Mesos for resources.
+
+#### resources (list)
+
+Property             | Description
+-------------------  | ---------------------------------
+name (required)      | Name of the resource: cpus or mem.
+type (required)      | Type of resource. Should always be SCALAR.
+scalar (required)    | Value in float for cpus or int for mem (in MBs)
+
+### volume_mounts (list)
+
+Property                     | Description
+---------------------------  | ---------------------------------
+host_path (required)         | Host path to mount inside the container.
+container_path (required)    | Path inside the container where `host_path` will be mounted.
+mode (required)              | Mode in which to mount the volume, Read-Write (RW) or Read-Only (RO).
+
+A sample configuration is as follows:
+
+```json
+[
+    {
+      "executor": {
+        "name": "myExecutor",
+        "command": {
+          "value": "myExecutor.a",
+          "shell": "false",
+          "arguments": [
+            "localhost:2181",
+            "-verbose",
+            "-config myConfiguration.config"
+          ],
+          "uris": [
+            {
+              "value": "/dist/myExecutor.a",
+              "executable": true,
+              "extract": false,
+              "cache": true
+            },
+            {
+              "value": "/home/user/myConfiguration.config",
+              "executable": false,
+              "extract": false,
+              "cache": false
+            }
+          ]
+        },
+        "resources": [
+          {
+            "name": "cpus",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 1.00
+            }
+          },
+          {
+            "name": "mem",
+            "type": "SCALAR",
+            "scalar": {
+              "value": 512
+            }
+          }
+        ]
+      },
+      "volume_mounts": [
+        {
+          "mode": "RO",
+          "container_path": "/path/on/container",
+          "host_path": "/path/to/host/directory"
+        },
+        {
+          "mode": "RW",
+          "container_path": "/container",
+          "host_path": "/host"
+        }
+      ],
+      "task_prefix": "my-executor-"
+    }
+]
+```
+
+It should be noted that if you do not use Thermos or a Thermos based executor, links in the scheduler's
+Web UI for tasks will not work (at least for the time being).
+Some information about launched tasks can still be accessed via the Mesos Web UI or via the Aurora Client.
+
+### Using a custom executor
+
+To launch tasks using a custom executor,
+an [ExecutorConfig](../../reference/configuration/#executorconfig-objects) object must be added to
+the Job or Service object. The `name` parameter of ExecutorConfig must match the name of an executor
+defined in the JSON object provided to the scheduler at startup time.
+
+For example, if we desire to launch tasks using `myExecutor` (defined above), we may do so in
+the following manner:
+
+```
+jobs = [Service(
+  task = task,
+  cluster = 'devcluster',
+  role = 'www-data',
+  environment = 'prod',
+  name = 'hello',
+  executor_config = ExecutorConfig(name='myExecutor'))]
+```
+
+This will create a Service Job which will launch tasks using myExecutor instead of Thermos.
diff --git a/source/documentation/latest/features/job-updates.md b/source/documentation/latest/features/job-updates.md
new file mode 100644
index 0000000..a532e2e
--- /dev/null
+++ b/source/documentation/latest/features/job-updates.md
@@ -0,0 +1,123 @@
+Aurora Job Updates
+==================
+
+`Job` configurations can be updated at any point in their lifecycle.
+Usually updates are done incrementally using a process called a *rolling
+upgrade*, in which Tasks are upgraded in small groups, one group at a
+time.  Updates are done using various Aurora Client commands.
+
+
+Rolling Job Updates
+-------------------
+
+There are several sub-commands to manage job updates:
+
+    aurora update start <job key> <configuration file>
+    aurora update info <job key>
+    aurora update pause <job key>
+    aurora update resume <job key>
+    aurora update abort <job key>
+    aurora update list <cluster>
+
+When you `start` a job update, the command will return once it has sent the
+instructions to the scheduler.  At that point, you may view detailed
+progress for the update with the `info` subcommand, in addition to viewing
+graphical progress in the web browser.  You may also get a full listing of
+in-progress updates in a cluster with `list`.
+
+Once an update has been started, you can `pause` to keep the update but halt
+progress.  This can be useful for doing things like debug a  partially-updated
+job to determine whether you would like to proceed.  You can `resume` to
+proceed.
+
+You may `abort` a job update regardless of the state it is in. This will
+instruct the scheduler to completely abandon the job update and leave the job
+in the current (possibly partially-updated) state.
+
+For a configuration update, the Aurora Scheduler calculates required changes
+by examining the current job config state and the new desired job config.
+It then starts a *rolling batched update process* by going through every batch
+and performing these operations, in order:
+
+- If an instance is not present in the scheduler but is present in
+  the new config, then the instance is created.
+- If an instance is present in both the scheduler and the new config, then
+  the scheduler diffs both task configs. If it detects any changes, it
+  performs an instance update by killing the old config instance and adds
+  the new config instance.
+- If an instance is present in the scheduler but isn't in the new config,
+  then that instance is killed.
+
+The Aurora Scheduler continues through the instance list until all tasks are
+updated and in `RUNNING`. If the scheduler determines the update is not going
+well (based on the criteria specified in the UpdateConfig), it cancels the update.
+
+Update cancellation runs a procedure similar to the described above
+update sequence, but in reverse order. New instance configs are swapped
+with old instance configs and batch updates proceed backwards
+from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
+8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
+
+For details on how to control a job update, please see the
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) configuration object.
+
+
+Coordinated Job Updates
+------------------------
+
+Some Aurora services may benefit from having more control over updates by explicitly
+acknowledging ("heartbeating") job update progress. This may be helpful for mission-critical
+service updates where explicit job health monitoring is vital during the entire job update
+lifecycle. Such job updates would rely on an external service (or a custom client) periodically
+pulsing an active coordinated job update via a
+[pulseJobUpdate RPC](https://github.com/apache/aurora/blob/rel/0.22.0/api/src/main/thrift/org/apache/aurora/gen/api.thrift).
+
+A coordinated update is defined by setting a positive
+[pulse_interval_secs](../../reference/configuration/#updateconfig-objects) value in job configuration
+file. If no pulses are received within specified interval the update will be blocked. A blocked
+update is unable to continue rolling forward (or rolling back) but retains its active status.
+It may only be unblocked by a fresh `pulseJobUpdate` call.
+
+NOTE: A coordinated update starts in `ROLL_FORWARD_AWAITING_PULSE` state and will not make any
+progress until the first pulse arrives. However, a paused update (`ROLL_FORWARD_PAUSED` or
+`ROLL_BACK_PAUSED`) is still considered active and upon resuming will immediately make progress
+provided the pulse interval has not expired.
+
+
+SLA-Aware Updates
+-----------------
+
+Updates can take advantage of [Custom SLA Requirements](../../features/sla-requirements/) and
+specify the `sla_aware=True` option within
+[UpdateConfig](../../reference/configuration/#updateconfig-objects) to only update instances if
+the action will maintain the task's SLA requirements. This feature allows updates to avoid killing
+too many instances in the face of unexpected failures outside of the update range.
+
+See the [Using the `sla_aware` option](../../reference/configuration/#using-the-sla-aware-option)
+for more information on how to use this feature.
+
+
+Canary Deployments
+------------------
+
+Canary deployments are a pattern for rolling out updates to a subset of job instances,
+in order to test different code versions alongside the actual production job.
+It is a risk-mitigation strategy for job owners and commonly used in a form where
+job instance 0 runs with a different configuration than the instances 1-N.
+
+For example, consider a job with 4 instances that each
+request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
+in the configuration file `hello_world.aurora`. If you want to
+update it so it requests 2 GB of RAM instead of 1. You can create a new
+configuration file to do that called `new_hello_world.aurora` and
+issue
+
+    aurora update start <job_key_value>/0-1 new_hello_world.aurora
+
+This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
+while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
+dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
+
+So that means there are two simultaneous task configurations for the same job
+at the same time, just valid for different ranges of instances. While this isn't a recommended
+pattern, it is valid and supported by the Aurora scheduler.
diff --git a/source/documentation/latest/features/mesos-fetcher.md b/source/documentation/latest/features/mesos-fetcher.md
new file mode 100644
index 0000000..967a905
--- /dev/null
+++ b/source/documentation/latest/features/mesos-fetcher.md
@@ -0,0 +1,46 @@
+Mesos Fetcher
+=============
+
+Mesos has support for downloading resources into the sandbox through the
+use of the [Mesos Fetcher](http://mesos.apache.org/documentation/latest/fetcher/)
+
+Aurora supports passing URIs to the Mesos Fetcher dynamically by including
+a list of URIs in job submissions.
+
+How to use
+----------
+The scheduler flag `-enable_mesos_fetcher` must be set to true.
+
+Currently only the scheduler side of this feature has been implemented
+so a modification to the existing client, or a custom Thrift client are required
+to make use of this feature.
+
+If using a custom Thrift client, the list of URIs must be included in TaskConfig
+as the `mesosFetcherUris` field.
+
+Each Mesos Fetcher URI has the following data members:
+
+|Property | Description|
+|---------|------|
+|value (required)  |Path to the resource needed in the sandbox.|
+|extract (optional)|Extract files from packed or compressed archives into the sandbox.|
+|cache (optional) | Use caching mechanism provided by Mesos for resources.|
+
+Note that this structure is very similar to the one provided for downloading
+resources needed for a [custom executor](../../operations/configuration/).
+
+This is because both features use the Mesos fetcher to retrieve resources into
+the sandbox. However, one, the custom executor feature, has a static set of URIs
+set in the server side, and the other, the Mesos Fetcher feature, is a dynamic set
+of URIs set at the time of job submission.
+
+Security Implications
+---------------------
+There are security implications that must be taken into account when enabling this feature.
+**Enabling this feature may potentially enable any job submitting user to perform a privilege escalation.**
+
+Until a more through solution is created, one step that has been taken to mitigate this issue
+is to statically mark every user submitted URI as non-executable. This is in contrast to the set of URIs
+set in the custom executor feature which may mark any URI as executable.
+
+If the need arises to mark a downloaded URI as executable, please consider using the custom executor feature.
\ No newline at end of file
diff --git a/source/documentation/latest/features/multitenancy.md b/source/documentation/latest/features/multitenancy.md
new file mode 100644
index 0000000..08aedd5
--- /dev/null
+++ b/source/documentation/latest/features/multitenancy.md
@@ -0,0 +1,82 @@
+Multitenancy
+============
+
+Aurora is a multi-tenant system that can run jobs of multiple clients/tenants.
+Going beyond the [resource isolation on an individual host](../resource-isolation/), it is
+crucial to prevent those jobs from stepping on each others toes.
+
+
+Job Namespaces
+--------------
+
+The namespace for jobs in Aurora follows a hierarchical structure. This is meant to make it easier
+to differentiate between different jobs. A job key consists of four parts. The four parts are
+`<cluster>/<role>/<environment>/<jobname>` in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts.
+* Environment names are namespaces.
+* Jobname is the custom name of your job.
+
+Role names correspond to user accounts. They are used for
+[authentication](../../operations/security/), as the linux user used to run jobs, and for the
+assignment of [quota](#preemption). If you don't know what accounts are available, contact your
+sysadmin.
+
+The environment component in the job key, serves as a namespace. The values for
+environment are validated in the scheduler. By default allowing any of `devel`, `test`,
+`production`, and any value matching the regular expression `staging[0-9]*`. This validation can be
+changed to allow any arbitrary regular expression by setting the scheduler option `allowed_job_environments`.
+
+None of the values imply any difference in the scheduling behavior. Conventionally, the
+"environment" is set so as to indicate a certain level of stability in the behavior of the job
+by ensuring that an appropriate level of testing has been performed on the application code. e.g.
+in the case of a typical Job, releases may progress through the following phases in order of
+increasing level of stability: `devel`, `test`, `staging`, `production`.
+
+
+Configuration Tiers
+-------------------
+
+Tier is a predefined bundle of task configuration options. Aurora schedules tasks and assigns them
+resources based on their tier assignment. The default scheduler tier configuration allows for
+3 tiers:
+
+ - `revocable`: The `revocable` tier requires the task to run with [revocable](../resource-isolation/#oversubscription)
+ resources.
+ - `preemptible`: Setting the task’s tier to `preemptible` allows for the possibility of that task
+ being [preempted](#preemption) by other tasks when cluster is running low on resources.
+ - `preferred`: The `preferred` tier prevents the task from using [revocable](../resource-isolation/#oversubscription)
+ resources and from being [preempted](#preemption).
+
+Since it is possible that a cluster is configured with a custom tier configuration, users should
+consult their cluster administrator to be informed of the tiers supported by the cluster. Attempts
+to schedule jobs with an unsupported tier will be rejected by the scheduler.
+
+
+Preemption
+----------
+
+In order to guarantee that important production jobs are always running, Aurora supports
+preemption.
+
+Let's consider we have a pending job that is candidate for scheduling but resource shortage pressure
+prevents this. Active tasks can become the victim of preemption, if:
+
+ - both candidate and victim are owned by the same role and the
+   [priority](../../reference/configuration/#job-objects) of a victim is lower than the
+   [priority](../../reference/configuration/#job-objects) of the candidate.
+ - OR a victim is a `preemptible` or `revocable` [tier](#configuration-tiers) task and the candidate
+   is a `preferred` [tier](#configuration-tiers) task.
+
+In other words, tasks from `preferred` [tier](../../reference/configuration/#job-objects) jobs may
+preempt tasks from any `preemptible` or `revocable` job. However, a `preferred` task may only be
+preempted by tasks from `preferred` jobs in the same role with higher [priority](../../reference/configuration/#job-objects).
+
+Aurora requires resource quotas for [production non-dedicated jobs](../../reference/configuration/#job-objects).
+Quota is enforced at the job role level and when set, defines a non-preemptible pool of compute resources within
+that role. All job types (service, adhoc or cron) require role resource quota unless a job has
+[dedicated constraint set](../constraints/#dedicated-attribute).
+
+To grant quota to a particular role in production, an operator can use the command
+`aurora_admin set_quota`.
diff --git a/source/documentation/latest/features/resource-isolation.md b/source/documentation/latest/features/resource-isolation.md
new file mode 100644
index 0000000..a5294f6
--- /dev/null
+++ b/source/documentation/latest/features/resource-isolation.md
@@ -0,0 +1,181 @@
+Resources Isolation and Sizing
+==============================
+
+This document assumes Aurora and Mesos have been configured
+using our [recommended resource isolation settings](../../operations/configuration/#resource-isolation).
+
+- [Isolation](#isolation)
+- [Sizing](#sizing)
+- [Oversubscription](#oversubscription)
+
+
+Isolation
+---------
+
+Aurora is a multi-tenant system; a single software instance runs on a
+server, serving multiple clients/tenants. To share resources among
+tenants, it leverages Mesos for isolation of:
+
+* CPU
+* GPU
+* memory
+* disk space
+* ports
+
+CPU is a soft limit, and handled differently from memory and disk space.
+Too low a CPU value results in throttling your application and
+slowing it down. Memory and disk space are both hard limits; when your
+application goes over these values, it's killed.
+
+### CPU Isolation
+
+Mesos can be configured to use a quota based CPU scheduler (the *Completely*
+*Fair Scheduler*) to provide consistent and predictable performance.
+This is effectively a guarantee of resources -- you receive at least what
+you requested, but also no more than you've requested.
+
+The scheduler gives applications a CPU quota for every 100 ms interval.
+When an application uses its quota for an interval, it is throttled for
+the rest of the 100 ms. Usage resets for each interval and unused
+quota does not carry over.
+
+For example, an application specifying 4.0 CPU has access to 400 ms of
+CPU time every 100 ms. This CPU quota can be used in different ways,
+depending on the application and available resources. Consider the
+scenarios shown in this diagram.
+
+![CPU Availability](../images/CPUavailability.png)
+
+* *Scenario A*: the application can use up to 4 cores continuously for
+every 100 ms interval. It is never throttled and starts processing
+new requests immediately.
+
+* *Scenario B* : the application uses up to 8 cores (depending on
+availability) but is throttled after 50 ms. The CPU quota resets at the
+start of each new 100 ms interval.
+
+* *Scenario C* : is like Scenario A, but there is a garbage collection
+event in the second interval that consumes all CPU quota. The
+application throttles for the remaining 75 ms of that interval and
+cannot service requests until the next interval. In this example, the
+garbage collection finished in one interval but, depending on how much
+garbage needs collecting, it may take more than one interval and further
+delay service of requests.
+
+*Technical Note*: Mesos considers logical cores, also known as
+hyperthreading or SMT cores, as the unit of CPU.
+
+### Memory Isolation
+
+Mesos uses dedicated memory allocation. Your application always has
+access to the amount of memory specified in your configuration. The
+application's memory use is defined as the sum of the resident set size
+(RSS) of all processes in a shard. Each shard is considered
+independently.
+
+In other words, say you specified a memory size of 10GB. Each shard
+would receive 10GB of memory. If an individual shard's memory demands
+exceed 10GB, that shard is killed, but the other shards continue
+working.
+
+*Technical note*: Total memory size is not enforced at allocation time,
+so your application can request more than its allocation without getting
+an ENOMEM. However, it will be killed shortly after.
+
+### Disk Space
+
+Disk space used by your application is defined as the sum of the files'
+disk space in your application's directory, including the `stdout` and
+`stderr` logged from your application. Each shard is considered
+independently. You should use off-node storage for your application's
+data whenever possible.
+
+In other words, say you specified disk space size of 100MB. Each shard
+would receive 100MB of disk space. If an individual shard's disk space
+demands exceed 100MB, that shard is killed, but the other shards
+continue working.
+
+After your application finishes running, its allocated disk space is
+reclaimed. Thus, your job's final action should move any disk content
+that you want to keep, such as logs, to your home file system or other
+less transitory storage. Disk reclamation takes place an undefined
+period after the application finish time; until then, the disk contents
+are still available but you shouldn't count on them being so.
+
+*Technical note* : Disk space is not enforced at write so your
+application can write above its quota without getting an ENOSPC, but it
+will be killed shortly after. This is subject to change.
+
+### GPU Isolation
+
+GPU isolation will be supported for Nvidia devices starting from Mesos 1.0.
+Access to the allocated units will be exclusive with no sharing between tasks
+allowed (e.g. no fractional GPU allocation). For more details, see the
+[Mesos design document](https://docs.google.com/document/d/10GJ1A80x4nIEo8kfdeo9B11PIbS1xJrrB4Z373Ifkpo/edit#heading=h.w84lz7p4eexl)
+and the [Mesos agent configuration](http://mesos.apache.org/documentation/latest/configuration/).
+
+### Other Resources
+
+Other resources, such as network bandwidth, do not have any performance
+guarantees. For some resources, such as memory bandwidth, there are no
+practical sharing methods so some application combinations collocated on
+the same host may cause contention.
+
+
+Sizing
+-------
+
+### CPU Sizing
+
+To correctly size Aurora-run Mesos tasks, specify a per-shard CPU value
+that lets the task run at its desired performance when at peak load
+distributed across all shards. Include reserve capacity of at least 50%,
+possibly more, depending on how critical your service is (or how
+confident you are about your original estimate : -)), ideally by
+increasing the number of shards to also improve resiliency. When running
+your application, observe its CPU stats over time. If consistently at or
+near your quota during peak load, you should consider increasing either
+per-shard CPU or the number of shards.
+
+## Memory Sizing
+
+Size for your application's peak requirement. Observe the per-instance
+memory statistics over time, as memory requirements can vary over
+different periods. Remember that if your application exceeds its memory
+value, it will be killed, so you should also add a safety margin of
+around 10-20%. If you have the ability to do so, you may also want to
+put alerts on the per-instance memory.
+
+## Disk Space Sizing
+
+Size for your application's peak requirement. Rotate and discard log
+files as needed to stay within your quota. When running a Java process,
+add the maximum size of the Java heap to your disk space requirement, in
+order to account for an out of memory error dumping the heap
+into the application's sandbox space.
+
+## GPU Sizing
+
+GPU is highly dependent on your application requirements and is only limited
+by the number of physical GPU units available on a target box.
+
+
+Oversubscription
+----------------
+
+Mesos supports [oversubscription of machine resources](http://mesos.apache.org/documentation/latest/oversubscription/)
+via the concept of revocable tasks. In contrast to non-revocable tasks, revocable tasks are best-effort.
+Mesos reserves the right to throttle or even kill them if they might affect existing high-priority
+user-facing services.
+
+As of today, the only revocable resource supported by Aurora are CPU and RAM resources. A job can
+opt-in to use those by specifying the `revocable` [Configuration Tier](../../features/multitenancy/#configuration-tiers).
+A revocable job will only be scheduled using revocable resources, even if there are plenty of
+non-revocable resources available.
+
+The Aurora scheduler must be [configured to receive revocable offers](../../operations/configuration/#resource-isolation)
+from Mesos and accept revocable jobs. If not configured properly revocable tasks will never get
+assigned to hosts and will stay in `PENDING`.
+
+For details on how to mark a job as being revocable, see the
+[Configuration Reference](../../reference/configuration/).
diff --git a/source/documentation/latest/features/service-discovery.md b/source/documentation/latest/features/service-discovery.md
new file mode 100644
index 0000000..b35a569
--- /dev/null
+++ b/source/documentation/latest/features/service-discovery.md
@@ -0,0 +1,43 @@
+Service Discovery
+=================
+
+It is possible for the Aurora executor to announce tasks into ServerSets for
+the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
+of which there are several reference implementations:
+
+  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
+  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
+  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
+
+These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
+
+For more information about how to configure announcing, see the [Configuration Reference](../../reference/configuration/).
+
+Using Mesos DiscoveryInfo
+-------------------------
+Experimental support for populating DiscoveryInfo in Mesos is introduced in Aurora. This can be used to build
+custom service discovery system not using zookeeper. Please see `Service Discovery` section in
+[Mesos Framework Development guide](http://mesos.apache.org/documentation/latest/app-framework-development-guide/) for
+explanation of the protobuf message in Mesos.
+
+To use this feature, please enable `--populate_discovery_info` flag on scheduler. All jobs started by scheduler
+afterwards will have their portmap populated to Mesos and discoverable in `/state` endpoint in Mesos master and agent.
+
+### Using Mesos DNS
+An example is using [Mesos-DNS](https://github.com/mesosphere/mesos-dns), which is able to generate multiple DNS
+records. With current implementation, the example job with key `devcluster/vagrant/test/http-example` generates at
+least the following:
+
+1. An A record for `http_example.test.vagrant.aurora.mesos` (which only includes IP address);
+2. A [SRV record](https://en.wikipedia.org/wiki/SRV_record) for
+ `_http_example.test.vagrant._tcp.aurora.mesos`, which includes IP address and every port. This should only
+  be used if the service has one port.
+3. A SRV record `_{port-name}._http_example.test.vagrant._tcp.aurora.mesos` for each port name
+  defined. This should be used when the service has multiple ports. To have this working properly it's needed to
+  add `-populate_discovery_info` to scheduler's configuration.
+
+Things to note:
+
+1. The domain part (".mesos" in above example) can be configured in [Mesos DNS](http://mesosphere.github.io/mesos-dns/docs/configuration-parameters.html);
+2. Right now, portmap and port aliases in announcer object are not reflected in DiscoveryInfo, therefore not visible in
+   Mesos DNS records either. This is because they are only resolved in thermos executors.
diff --git a/source/documentation/latest/features/services.md b/source/documentation/latest/features/services.md
new file mode 100644
index 0000000..91889d2
--- /dev/null
+++ b/source/documentation/latest/features/services.md
@@ -0,0 +1,116 @@
+Long-running Services
+=====================
+
+Jobs that are always restart on completion, whether successful or unsuccessful,
+are called services. This is useful for long-running processes
+such as webservices that should always be running, unless stopped explicitly.
+
+
+Service Specification
+---------------------
+
+A job is identified as a service by the presence of the flag
+``service=True` in the [`Job`](../../reference/configuration/#job-objects) object.
+The `Service` alias can be used as shorthand for `Job` with `service=True`.
+
+Example (available in the [Vagrant environment](../../getting-started/vagrant/)):
+
+    $ cat /vagrant/examples/jobs/hello_world.aurora
+    hello = Process(
+      name = 'hello',
+      cmdline = """
+        while true; do
+          echo hello world
+          sleep 10
+        done
+      """)
+
+    task = SequentialTask(
+      processes = [hello],
+      resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB)
+    )
+
+    jobs = [
+      Service(
+        task = task,
+        cluster = 'devcluster',
+        role = 'www-data',
+        environment = 'prod',
+        name = 'hello'
+      )
+    ]
+
+
+Jobs without the service bit set only restart up to `max_task_failures` times and only if they
+terminated unsuccessfully either due to human error or machine failure (see the
+[`Job`](../../reference/configuration/#job-objects) object for details).
+
+
+Ports
+-----
+
+In order to be useful, most services have to bind to one or more ports. Aurora enables this
+usecase via the [`thermos.ports` namespace](../../reference/configuration/#thermos-namespace) that
+allows to request arbitrarily named ports:
+
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}'
+    )
+
+
+When this process is included in a job, the job will be allocated a port, and the command line
+will be replaced with something like:
+
+    ./run_nginx.sh -port 42816
+
+Where 42816 happens to be the allocated port.
+
+For details on how to enable clients to discover this dynamically assigned port, see our
+[Service Discovery](../service-discovery/) documentation.
+
+
+Health Checking
+---------------
+
+Typically, the Thermos executor monitors processes within a task only by liveness of the forked
+process. In addition to that, Aurora has support for rudimentary health checking: Either via HTTP
+via custom shell scripts.
+
+For example, simply by requesting a `health` port, a process can request to be health checked
+via repeated calls to the `/health` endpoint:
+
+    nginx = Process(
+      name = 'nginx',
+      cmdline = './run_nginx.sh -port {{thermos.ports[health]}}'
+    )
+
+Please see the
+[configuration reference](../../reference/configuration/#healthcheckconfig-objects)
+for configuration options for this feature.
+
+Starting with the 0.17.0 release, job updates rely only on task health-checks by introducing
+a `min_consecutive_successes` parameter on the HealthCheckConfig object. This parameter represents
+the number of successful health checks needed before a task is moved into the `RUNNING` state. Tasks
+that do not have enough successful health checks within the first `n` attempts, are moved to the
+`FAILED` state, where `n = ceil(initial_interval_secs/interval_secs) + max_consecutive_failures +
+min_consecutive_successes`. In order to accommodate variability during task warm up, `initial_interval_secs`
+will act as a grace period. Any health-check failures during the first `m` attempts are ignored and
+do not count towards `max_consecutive_failures`, where `m = ceil(initial_interval_secs/interval_secs)`.
+
+As [job updates](../job-updates/) are based only on health-checks, it is not necessary to set
+`watch_secs` to the worst-case update time, it can instead be set to 0. The scheduler considers a
+task that is in the `RUNNING` to be healthy and proceeds to updating the next batch of instances.
+For details on how to control health checks, please see the
+[HealthCheckConfig](../../reference/configuration/#healthcheckconfig-objects) configuration object.
+Existing jobs that do not configure a health-check can fall-back to using `watch_secs` to
+monitor a task before considering it healthy.
+
+You can pause health checking by touching a file inside of your sandbox, named `.healthchecksnooze`.
+As long as that file is present, health checks will be disabled, enabling users to gather core
+dumps or other performance measurements without worrying about Aurora's health check killing
+their process.
+
+WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
+disabled health checks.
diff --git a/source/documentation/latest/features/sla-metrics.md b/source/documentation/latest/features/sla-metrics.md
new file mode 100644
index 0000000..3b40e02
--- /dev/null
+++ b/source/documentation/latest/features/sla-metrics.md
@@ -0,0 +1,215 @@
+Aurora SLA Measurement
+======================
+
+- [Overview](#overview)
+- [Metric Details](#metric-details)
+  - [Platform Uptime](#platform-uptime)
+  - [Job Uptime](#job-uptime)
+  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
+  - [Median Time To Starting (MTTS)](#median-time-to-starting-\(mtts\))
+  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
+- [Limitations](#limitations)
+
+## Overview
+
+The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
+Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
+and hosted services.
+
+The Aurora SLA feature is by default only enabled for service (non-cron)
+production jobs (`"production=True"` in your `.aurora` config). It can be enabled for
+non-production services by an operator via the scheduler command line flag `-sla_non_prod_metrics`.
+
+Counters that track SLA measurements are computed periodically within the scheduler.
+The individual instance metrics are refreshed every minute (configurable via
+`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
+relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
+that would be `http://192.168.33.7:8081/vars`)
+
+
+## Metric Details
+
+### Platform Uptime
+
+*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
+or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
+system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
+will not degrade this metric.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_platform_uptime_percent`
+* Per cluster - `sla_cluster_platform_uptime_percent`
+
+**Units:** percent
+
+A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
+or lose track of the task existence. In such cases, the service task is marked as LOST and
+rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
+for too long or the Mesos agent becomes unhealthy (or disappears completely). The time between
+task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
+
+Another example of a platform downtime event is the administrator-requested task rescheduling. This
+happens during planned Mesos agent maintenance when all agent tasks are marked as DRAINED and
+rescheduled elsewhere.
+
+To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
+actions that put a service instance in a non-operational state. It is simpler to isolate
+user-incurred downtime and treat all other downtime as platform incurred.
+
+Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
+or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
+relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
+transition records, we can build a deterministic downtime trace for every given service instance.
+
+A task going through a state transition carries one of three possible SLA meanings
+(see [SlaAlgorithm.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
+sla-to-task-state mapping):
+
+* Task is UP: starts a period where the task is considered to be up and running from the Aurora
+  platform standpoint.
+
+* Task is DOWN: starts a period where the task cannot reach the UP state for some
+  non-user-related reason. Counts towards instance downtime.
+
+* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
+  user initiated action or failure. We ignore this period for the uptime calculation purposes.
+
+This metric is recalculated over the last sampling period (last minute) to account for
+any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
+sampling interval as well as adjacent REMOVED events.
+
+### Job Uptime
+
+*Percentage of the job instances considered to be in RUNNING state for the specified duration
+relative to request time. This is a purely application side metric that is considering aggregate
+uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
+this metric.*
+
+**Collection scope:** We currently expose job uptime values at 5 pre-defined
+percentiles (50th,75th,90th,95th and 99th):
+
+* `sla_<job_key>_job_uptime_50_00_sec`
+* `sla_<job_key>_job_uptime_75_00_sec`
+* `sla_<job_key>_job_uptime_90_00_sec`
+* `sla_<job_key>_job_uptime_95_00_sec`
+* `sla_<job_key>_job_uptime_99_00_sec`
+
+**Units:** seconds
+You can also get customized real-time stats from aurora client. See `aurora sla -h` for
+more details.
+
+### Median Time To Assigned (MTTA)
+
+*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
+metric that helps track the dependency of scheduling performance on the requested resources
+(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtta_ms`
+* Per cluster - `sla_cluster_mtta_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtta_ms`
+    * `sla_cpu_medium_mtta_ms`
+    * `sla_cpu_large_mtta_ms`
+    * `sla_cpu_xlarge_mtta_ms`
+    * `sla_cpu_xxlarge_mtta_ms`
+  * By RAM:
+    * `sla_ram_small_mtta_ms`
+    * `sla_ram_medium_mtta_ms`
+    * `sla_ram_large_mtta_ms`
+    * `sla_ram_xlarge_mtta_ms`
+    * `sla_ram_xxlarge_mtta_ms`
+  * By DISK:
+    * `sla_disk_small_mtta_ms`
+    * `sla_disk_medium_mtta_ms`
+    * `sla_disk_large_mtta_ms`
+    * `sla_disk_xlarge_mtta_ms`
+    * `sla_disk_xxlarge_mtta_ms`
+
+**Units:** milliseconds
+
+MTTA only considers instances that have already reached ASSIGNED state and ignores those
+that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
+constraints) do not affect metric curves.
+
+### Median Time To Starting (MTTS)
+
+*Median time a job waits for its tasks to reach STARTING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start initializing the sandbox
+for a task.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mtts_ms`
+* Per cluster - `sla_cluster_mtts_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mtts_ms`
+    * `sla_cpu_medium_mtts_ms`
+    * `sla_cpu_large_mtts_ms`
+    * `sla_cpu_xlarge_mtts_ms`
+    * `sla_cpu_xxlarge_mtts_ms`
+  * By RAM:
+    * `sla_ram_small_mtts_ms`
+    * `sla_ram_medium_mtts_ms`
+    * `sla_ram_large_mtts_ms`
+    * `sla_ram_xlarge_mtts_ms`
+    * `sla_ram_xxlarge_mtts_ms`
+  * By DISK:
+    * `sla_disk_small_mtts_ms`
+    * `sla_disk_medium_mtts_ms`
+    * `sla_disk_large_mtts_ms`
+    * `sla_disk_xlarge_mtts_ms`
+    * `sla_disk_xxlarge_mtts_ms`
+
+**Units:** milliseconds
+
+MTTS only considers instances in STARTING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+### Median Time To Running (MTTR)
+
+*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
+reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
+
+**Collection scope:**
+
+* Per job - `sla_<job_key>_mttr_ms`
+* Per cluster - `sla_cluster_mttr_ms`
+* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
+[ResourceBag.java](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/resources/ResourceBag.java)
+  * By CPU:
+    * `sla_cpu_small_mttr_ms`
+    * `sla_cpu_medium_mttr_ms`
+    * `sla_cpu_large_mttr_ms`
+    * `sla_cpu_xlarge_mttr_ms`
+    * `sla_cpu_xxlarge_mttr_ms`
+  * By RAM:
+    * `sla_ram_small_mttr_ms`
+    * `sla_ram_medium_mttr_ms`
+    * `sla_ram_large_mttr_ms`
+    * `sla_ram_xlarge_mttr_ms`
+    * `sla_ram_xxlarge_mttr_ms`
+  * By DISK:
+    * `sla_disk_small_mttr_ms`
+    * `sla_disk_medium_mttr_ms`
+    * `sla_disk_large_mttr_ms`
+    * `sla_disk_xlarge_mttr_ms`
+    * `sla_disk_xxlarge_mttr_ms`
+
+**Units:** milliseconds
+
+MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
+unreasonable resource constraints) do not affect metric curves.
+
+## Limitations
+
+* The availability of Aurora SLA metrics is bound by the scheduler availability.
+
+* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
+  Scheduler restarts may result in missed collections.
diff --git a/source/documentation/latest/features/sla-requirements.md b/source/documentation/latest/features/sla-requirements.md
new file mode 100644
index 0000000..ee876f3
--- /dev/null
+++ b/source/documentation/latest/features/sla-requirements.md
@@ -0,0 +1,185 @@
+SLA Requirements
+================
+
+- [Overview](#overview)
+- [Default SLA](#default-sla)
+- [Custom SLA](#custom-sla)
+  - [Count-based](#count-based)
+  - [Percentage-based](#percentage-based)
+  - [Coordinator-based](#coordinator-based)
+
+## Overview
+
+Aurora guarantees SLA requirements for jobs. These requirements limit the impact of cluster-wide
+maintenance operations on the jobs. For instance, when an operator upgrades
+the OS on all the Mesos agent machines, the tasks scheduled on them needs to be drained.
+By specifying the SLA requirements a job can make sure that it has enough instances to
+continue operating safely without incurring downtime.
+
+> SLA is defined as minimum number of active tasks required for a job every duration window.
+A task is active if it was in `RUNNING` state during the last duration window.
+
+There is a [default](#default-sla) SLA guarantee for
+[preferred](../../features/multitenancy/#configuration-tiers) tier jobs and it is also possible to
+specify [custom](#custom-sla) SLA requirements.
+
+## Default SLA
+
+Aurora guarantees a default SLA requirement for tasks in
+[preferred](../../features/multitenancy/#configuration-tiers) tier.
+
+> 95% of tasks in a job will be `active` for every 30 mins.
+
+
+## Custom SLA
+
+For jobs that require different SLA requirements, Aurora allows jobs to specify their own
+SLA requirements via the `SlaPolicies`. There are 3 different ways to express SLA requirements.
+
+### [Count-based](../../reference/configuration/#countslapolicy-objects)
+
+For jobs that need a minimum `number` of instances to be running all the time,
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects)
+provides the ability to express the minimum number of required active instances (i.e. number of
+tasks that are `RUNNING` for at least `duration_secs`). For instance, if we have a
+`replicated-service` that has 3 instances and needs at least 2 instances every 30 minutes to be
+treated healthy, the SLA requirement can be expressed with a
+[`CountSlaPolicy`](../../reference/configuration/#countslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'replicated-service',
+  role = 'www-data',
+  instances = 3,
+  sla_policy = CountSlaPolicy(
+    count = 2,
+    duration_secs = 1800
+  )
+  ...
+)
+```
+
+### [Percentage-based](../../reference/configuration/#percentageslapolicy-objects)
+
+For jobs that need a minimum `percentage` of instances to be running all the time,
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) provides the
+ability to express the minimum percentage of required active instances (i.e. percentage of tasks
+that are `RUNNING` for at least `duration_secs`). For instance, if we have a `webservice` that
+has 10000 instances for handling peak load and cannot have more than 0.1% of the instances down
+for every 1 hr, the SLA requirement can be expressed with a
+[`PercentageSlaPolicy`](../../reference/configuration/#percentageslapolicy-objects) like below,
+
+```python
+Job(
+  name = 'frontend-service',
+  role = 'www-data',
+  instances = 10000,
+  sla_policy = PercentageSlaPolicy(
+    percentage = 99.9,
+    duration_secs = 3600
+  )
+  ...
+)
+```
+
+### [Coordinator-based](../../reference/configuration/#coordinatorslapolicy-objects)
+
+When none of the above methods are enough to describe the SLA requirements for a job, then the SLA
+calculation can be off-loaded to a custom service called the `Coordinator`. The `Coordinator` needs
+to expose an endpoint that will be called to check if removal of a task will affect the SLA
+requirements for the job. This is useful to control the number of tasks that undergoes maintenance
+at a time, without affected the SLA for the application.
+
+Consider the example, where we have a `storage-service` stores 2 replicas of an object. Each replica
+is distributed across the instances, such that replicas are stored on different hosts. In addition
+a consistent-hash is used for distributing the data across the instances.
+
+When an instance needs to be drained (say for host-maintenance), we have to make sure that at least 1 of
+the 2 replicas remains available. In such a case, a `Coordinator` service can be used to maintain
+the SLA guarantees required for the job.
+
+The job can be configured with a
+[`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) to specify the
+coordinator endpoint and the field in the response JSON that indicates if the SLA will be affected
+or not affected, when the task is removed.
+
+```python
+Job(
+  name = 'storage-service',
+  role = 'www-data',
+  sla_policy = CoordinatorSlaPolicy(
+    coordinator_url = 'http://coordinator.example.com',
+    status_key = 'drain'
+  )
+  ...
+)
+```
+
+
+#### Coordinator Interface [Experimental]
+
+When a [`CoordinatorSlaPolicy`](../../reference/configuration/#coordinatorslapolicy-objects) is
+specified for a job, any action that requires removing a task
+(such as drains) will be required to get approval from the `Coordinator` before proceeding. The
+coordinator service needs to expose a HTTP endpoint, that can take a `task-key` param
+(`<cluster>/<role>/<env>/<name>/<instance>`) and a json body describing the task
+details, force maintenance countdown (ms) and other params and return a response json that will
+contain the boolean status for allowing or disallowing the task's removal.
+
+##### Request:
+```javascript
+POST /
+  ?task=<cluster>/<role>/<env>/<name>/<instance>
+
+{
+  "forceMaintenanceCountdownMs": "604755646",
+  "task": "cluster/role/devel/job/1",
+  "taskConfig": {
+    "assignedTask": {
+      "taskId": "taskA",
+      "slaveHost": "a",
+      "task": {
+        "job": {
+          "role": "role",
+          "environment": "devel",
+          "name": "job"
+        },
+        ...
+      },
+      "assignedPorts": {
+        "http": 1000
+      },
+      "instanceId": 1
+      ...
+    },
+    ...
+  }
+}
+```
+
+##### Response:
+```json
+{
+  "drain": true
+}
+```
+
+If Coordinator allows removal of the task, then the task’s
+[termination lifecycle](../../reference/configuration/#httplifecycleconfig-objects)
+is triggered. If Coordinator does not allow removal, then the request will be retried again in the
+future.
+
+#### Coordinator Actions
+
+Coordinator endpoint get its own lock and this is used to serializes calls to the Coordinator.
+It guarantees that only one concurrent request is sent to a coordinator endpoint. This allows
+coordinators to simply look the current state of the tasks to determine its SLA (without having
+to worry about in-flight and pending requests). However if there are multiple coordinators,
+maintenance can be done in parallel across all the coordinators.
+
+_Note: Single concurrent request to a coordinator endpoint does not translate as exactly-once
+guarantee. The coordinator must be able to handle duplicate drain
+requests for the same task._
+
+
+
diff --git a/source/documentation/latest/features/webhooks.md b/source/documentation/latest/features/webhooks.md
new file mode 100644
index 0000000..286746e
--- /dev/null
+++ b/source/documentation/latest/features/webhooks.md
@@ -0,0 +1,112 @@
+Webhooks
+========
+
+Aurora has an optional feature which allows operator to specify a file to configure a HTTP webhook
+to receive task state change events. It can be enabled with a scheduler flag eg
+`-webhook_config=/path/to/webhook.json`. At this point, webhooks are still considered *experimental*.
+
+Below is a sample configuration:
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 5
+}
+```
+
+And an example of a response that you will get back:
+
+```json
+{
+    "task":
+    {
+        "cachedHashCode":0,
+        "assignedTask": {
+            "cachedHashCode":0,
+            "taskId":"vagrant-test-http_example-8-a6cf7ec5-d793-49c7-b10f-0e14ab80bfff",
+            "task": {
+                "cachedHashCode":-1819348376,
+                "job": {
+                    "cachedHashCode":803049425,
+                    "role":"vagrant",
+                    "environment":"test",
+                    "name":"http_example"
+                    },
+                "owner": {
+                    "cachedHashCode":226895216,
+                    "user":"vagrant"
+                    },
+                "isService":true,
+                "numCpus":0.1,
+                "ramMb":16,
+                "diskMb":8,
+                "priority":0,
+                "maxTaskFailures":1,
+                "production":false,
+                "resources":[
+                    {"cachedHashCode":729800451,"setField":"NUM_CPUS","value":0.1},
+                    {"cachedHashCode":552899914,"setField":"RAM_MB","value":16},
+                    {"cachedHashCode":-1547868317,"setField":"DISK_MB","value":8},
+                    {"cachedHashCode":1957328227,"setField":"NAMED_PORT","value":"http"},
+                    {"cachedHashCode":1954229436,"setField":"NAMED_PORT","value":"tcp"}
+                    ],
+                "constraints":[],
+                "requestedPorts":["http","tcp"],
+                "taskLinks":{"http":"http://%host%:%port:http%"},
+                "contactEmail":"vagrant@localhost",
+                "executorConfig": {
+                    "cachedHashCode":-1194797325,
+                    "name":"AuroraExecutor",
+                    "data": "{\"environment\": \"test\", \"health_check_config\": {\"initial_interval_secs\": 5.0, \"health_checker\": { \"http\": {\"expected_response_code\": 0, \"endpoint\": \"/health\", \"expected_response\": \"ok\"}}, \"max_consecutive_failures\": 0, \"timeout_secs\": 1.0, \"interval_secs\": 1.0}, \"name\": \"http_example\", \"service\": true, \"max_task_failures\": 1, \"cron_collision_policy\": \"KILL_EXISTING\", \"enable_hooks\": false, \"cluster\": \"devcluster\", \"task\": {\"processes\": [{\"daemon\": false, \"name\": \"echo_ports\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"echo \\\"tcp port: {{thermos.ports[tcp]}}; http port: {{thermos.ports[http]}}; alias: {{thermos.ports[alias]}}\\\"\", \"final\": false}, {\"daemon\": false, \"name\": \"stage_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"cp /vagrant/src/test/sh/org/apache/aurora/e2e/http_example.py .\", \"final\": false}, {\"daemon\": false, \"name\": \"run_server\", \"ephemeral\": false, \"max_failures\": 1, \"min_duration\": 5, \"cmdline\": \"python http_example.py {{thermos.ports[http]}}\", \"final\": false}], \"name\": \"http_example\", \"finalization_wait\": 30, \"max_failures\": 1, \"max_concurrency\": 0, \"resources\": {\"disk\": 8388608, \"ram\": 16777216, \"cpu\": 0.1}, \"constraints\": [{\"order\": [\"echo_ports\", \"stage_server\", \"run_server\"]}]}, \"production\": false, \"role\": \"vagrant\", \"contact\": \"vagrant@localhost\", \"announce\": {\"primary_port\": \"http\", \"portmap\": {\"alias\": \"http\"}}, \"lifecycle\": {\"http\": {\"graceful_shutdown_endpoint\": \"/quitquitquit\", \"port\": \"health\", \"shutdown_endpoint\": \"/abortabortabort\"}}, \"priority\": 0}"},
+                    "metadata":[],
+                    "container":{
+                        "cachedHashCode":-1955376216,
+                        "setField":"MESOS",
+                        "value":{"cachedHashCode":31}}
+                    },
+                    "assignedPorts":{},
+                    "instanceId":8
+        },
+        "status":"PENDING",
+        "failureCount":0,
+        "taskEvents":[
+            {"cachedHashCode":0,"timestamp":1464992060258,"status":"PENDING","scheduler":"aurora"}]
+        },
+        "oldState":{}}
+```
+
+By default, the webhook watches all TaskStateChanges and sends events to configured endpoint. If you
+are only interested in certain types of TaskStateChange (transition to `LOST` or `FAILED` statuses),
+you can specify a whitelist of the desired task statuses in webhook.json. The webhook will only send
+the corresponding events for the whitelisted statuses to the configured endpoint.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["LOST", "FAILED"]
+}
+```
+
+If you want to whitelist all TaskStateChanges, you can add a wildcard character `*` to your whitelist
+like below, or simply leave out the `statuses` field in webhook.json.
+
+```json
+{
+  "headers": {
+    "Content-Type": "application/vnd.kafka.json.v1+json",
+    "Producer-Type": "reliable"
+  },
+  "targetURL": "http://localhost:5000/",
+  "timeoutMsec": 50,
+  "statuses": ["*"]
+}
+```
diff --git a/source/documentation/latest/getting-started/overview.md b/source/documentation/latest/getting-started/overview.md
new file mode 100644
index 0000000..84c5916
--- /dev/null
+++ b/source/documentation/latest/getting-started/overview.md
@@ -0,0 +1,112 @@
+Aurora System Overview
+======================
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+
+Components
+----------
+
+It is important to have an understanding of the components that make up
+a functioning Aurora cluster.
+
+![Aurora Components](../images/components.png)
+
+* **Aurora scheduler**
+  The scheduler is your primary interface to the work you run in your cluster.  You will
+  instruct it to run jobs, and it will manage them in Mesos for you.  You will also frequently use
+  the scheduler's read-only web interface as a heads-up display for what's running in your cluster.
+
+* **Aurora client**
+  The client (`aurora` command) is a command line tool that exposes primitives that you can use to
+  interact with the scheduler. The client operates on
+
+  Aurora also provides an admin client (`aurora_admin` command) that contains commands built for
+  cluster administrators.  You can use this tool to do things like manage user quotas and manage
+  graceful maintenance on machines in cluster.
+
+* **Aurora executor**
+  The executor (a.k.a. Thermos executor) is responsible for carrying out the workloads described in
+  the Aurora DSL (`.aurora` files).  The executor is what actually executes user processes.  It will
+  also perform health checking of tasks and register tasks in ZooKeeper for the purposes of dynamic
+  service discovery.
+
+* **Aurora observer**
+  The observer provides browser-based access to the status of individual tasks executing on worker
+  machines.  It gives insight into the processes executing, and facilitates browsing of task sandbox
+  directories.
+
+* **ZooKeeper**
+  [ZooKeeper](http://zookeeper.apache.org) is a distributed consensus system.  In an Aurora cluster
+  it is used for reliable election of the leading Aurora scheduler and Mesos master.  It is also
+  used as a vehicle for service discovery, see [Service Discovery](../../features/service-discovery/)
+
+* **Mesos master**
+  The master is responsible for tracking worker machines and performing accounting of their
+  resources.  The scheduler interfaces with the master to control the cluster.
+
+* **Mesos agent**
+  The agent receives work assigned by the scheduler and executes them.  It interfaces with Linux
+  isolation systems like cgroups, namespaces and Docker to manage the resource consumption of tasks.
+  When a user task is launched, the agent will launch the executor (in the context of a Linux cgroup
+  or Docker container depending upon the environment), which will in turn fork user processes.
+
+  In earlier versions of Mesos and Aurora, the Mesos agent was known as the Mesos slave.
+
+
+Jobs, Tasks and Processes
+--------------------------
+
+Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
+cares about individual *tasks*, but typical jobs consist of dozens or
+hundreds of task replicas. Aurora provides a layer on top of Mesos with
+its `Job` abstraction. An Aurora `Job` consists of a task template and
+instructions for creating near-identical replicas of that task (modulo
+things like "instance id" or specific port numbers which may differ from
+machine to machine).
+
+How many tasks make up a Job is complicated. On a basic level, a Job consists of
+one task template and instructions for creating near-identical replicas of that task
+(otherwise referred to as "instances" or "shards").
+
+A task can merely be a single *process* corresponding to a single
+command line, such as `python2.7 my_script.py`. However, a task can also
+consist of many separate processes, which all run within a single
+sandbox. For example, running multiple cooperating agents together,
+such as `logrotate`, `installer`, master, or agent processes. This is
+where Thermos comes in. While Aurora provides a `Job` abstraction on
+top of Mesos `Tasks`, Thermos provides a `Process` abstraction
+underneath Mesos `Task`s and serves as part of the Aurora framework's
+executor.
+
+You define `Job`s,` Task`s, and `Process`es in a configuration file.
+Configuration files are written in Python, and make use of the
+[Pystachio](https://github.com/wickman/pystachio) templating language,
+along with specific Aurora, Mesos, and Thermos commands and methods.
+The configuration files typically end with a `.aurora` extension.
+
+Summary:
+
+* Aurora manages jobs made of tasks.
+* Mesos manages tasks made of processes.
+* Thermos manages processes.
+* All that is defined in `.aurora` configuration files
+
+![Aurora hierarchy](../images/aurora_hierarchy.png)
+
+Each `Task` has a *sandbox* created when the `Task` starts and garbage
+collected when it finishes. All of a `Task'`s processes run in its
+sandbox, so processes can share state by using a shared current working
+directory.
+
+The sandbox garbage collection policy considers many factors, most
+importantly age and size. It makes a best-effort attempt to keep
+sandboxes around as long as possible post-task in order for service
+owners to inspect data and logs, should the `Task` have completed
+abnormally. But you can't design your applications assuming sandboxes
+will be around forever, e.g. by building log saving or other
+checkpointing mechanisms directly into your application or into your
+`Job` description.
+
diff --git a/source/documentation/latest/getting-started/tutorial.md b/source/documentation/latest/getting-started/tutorial.md
new file mode 100644
index 0000000..1f037fc
--- /dev/null
+++ b/source/documentation/latest/getting-started/tutorial.md
@@ -0,0 +1,258 @@
+# Aurora Tutorial
+
+This tutorial shows how to use the Aurora scheduler to run (and "`printf-debug`")
+a hello world program on Mesos. This is the recommended document for new Aurora users
+to start getting up to speed on the system.
+
+- [Prerequisite](#setup-install-aurora)
+- [The Script](#the-script)
+- [Aurora Configuration](#aurora-configuration)
+- [Creating the Job](#creating-the-job)
+- [Watching the Job Run](#watching-the-job-run)
+- [Cleanup](#cleanup)
+- [Next Steps](#next-steps)
+
+
+## Prerequisite
+
+This tutorial assumes you are running [Aurora locally using Vagrant](../vagrant/).
+However, in general the instructions are also applicable to any other
+[Aurora installation](../../operations/installation/).
+
+Unless otherwise stated, all commands are to be run from the root of the aurora
+repository clone.
+
+
+## The Script
+
+Our "hello world" application is a simple Python script that loops
+forever, displaying the time every few seconds. Copy the code below and
+put it in a file named `hello_world.py` in the root of your Aurora repository clone
+(Note: this directory is the same as `/vagrant` inside the Vagrant VMs).
+
+The script has an intentional bug, which we will explain later on.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+import time
+
+def main():
+  SLEEP_DELAY = 10
+  # Python experts - ignore this blatant bug.
+  for i in xrang(100):
+    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
+      time.asctime(), SLEEP_DELAY))
+    time.sleep(SLEEP_DELAY)
+
+if __name__ == "__main__":
+  main()
+```
+
+## Aurora Configuration
+
+Once we have our script/program, we need to create a *configuration
+file* that tells Aurora how to manage and launch our Job. Save the below
+code in the file `hello_world.aurora`.
+
+<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
+-->
+```python
+pkg_path = '/vagrant/hello_world.py'
+
+# we use a trick here to make the configuration change with
+# the contents of the file, for simplicity.  in a normal setting, packages would be
+# versioned, and the version number would be changed in the configuration.
+import hashlib
+with open(pkg_path, 'rb') as f:
+  pkg_checksum = hashlib.md5(f.read()).hexdigest()
+
+# copy hello_world.py into the local sandbox
+install = Process(
+  name = 'fetch_package',
+  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
+
+# run the script
+hello_world = Process(
+  name = 'hello_world',
+  cmdline = 'python -u hello_world.py')
+
+# describe the task
+hello_world_task = SequentialTask(
+  processes = [install, hello_world],
+  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
+
+jobs = [
+  Service(cluster = 'devcluster',
+          environment = 'devel',
+          role = 'www-data',
+          name = 'hello_world',
+          task = hello_world_task)
+]
+```
+
+There is a lot going on in that configuration file:
+
+1. From a "big picture" viewpoint, it first defines two
+Processes. Then it defines a Task that runs the two Processes in the
+order specified in the Task definition, as well as specifying what
+computational and memory resources are available for them.  Finally,
+it defines a Job that will schedule the Task on available and suitable
+machines. This Job is the sole member of a list of Jobs; you can
+specify more than one Job in a config file.
+
+2. At the Process level, it specifies how to get your code into the
+local sandbox in which it will run. It then specifies how the code is
+actually run once the second Process starts.
+
+For more about Aurora configuration files, see the [Configuration
+Tutorial](../../reference/configuration-tutorial/) and the [Configuration
+Reference](../../reference/configuration/) (preferably after finishing this
+tutorial).
+
+
+## Creating the Job
+
+We're ready to launch our job! To do so, we use the Aurora Client to
+issue a Job creation request to the Aurora scheduler.
+
+Many Aurora Client commands take a *job key* argument, which uniquely
+identifies a Job. A job key consists of four parts, each separated by a
+"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
+in that order:
+
+* Cluster refers to the name of a particular Aurora installation.
+* Role names are user accounts existing on the agent machines. If you
+don't know what accounts are available, contact your sysadmin.
+* Environment names are namespaces; you can count on `test`, `devel`,
+`staging` and `prod` existing.
+* Jobname is the custom name of your job.
+
+When comparing two job keys, if any of the four parts is different from
+its counterpart in the other key, then the two job keys identify two separate
+jobs. If all four values are identical, the job keys identify the same job.
+
+The `clusters.json` [client configuration](../../reference/client-cluster-configuration/)
+for the Aurora scheduler defines the available cluster names.
+For Vagrant, from the top-level of your Aurora repository clone, do:
+
+    $ vagrant ssh
+
+Followed by:
+
+    vagrant@aurora:~$ cat /etc/aurora/clusters.json
+
+You'll see something like the following. The `name` value shown here, corresponds to a job key's cluster value.
+
+```javascript
+[{
+  "name": "devcluster",
+  "zk": "192.168.33.7",
+  "scheduler_zk_path": "/aurora/scheduler",
+  "auth_mechanism": "UNAUTHENTICATED",
+  "slave_run_directory": "latest",
+  "slave_root": "/var/lib/mesos"
+}]
+```
+
+The Aurora Client command that actually runs our Job is `aurora job create`. It creates a Job as
+specified by its job key and configuration file arguments and runs it.
+
+    aurora job create <cluster>/<role>/<environment>/<jobname> <config_file>
+
+Or for our example:
+
+    aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+
+After entering our virtual machine using `vagrant ssh`, this returns:
+
+    vagrant@aurora:~$ aurora job create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Creating job hello_world
+     INFO] Checking status of devcluster/www-data/devel/hello_world
+    Job create succeeded: job url=http://aurora.local:8081/scheduler/www-data/devel/hello_world
+
+
+## Watching the Job Run
+
+Now that our job is running, let's see what it's doing. Access the
+scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
+Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
+First we see what Jobs are scheduled:
+
+![Scheduled Jobs](../images/ScheduledJobs.png)
+
+Click on your user name, which in this case was `www-data`, and we see the Jobs associated
+with that role:
+
+![Role Jobs](../images/RoleJobs.png)
+
+If you click on your `hello_world` Job, you'll see:
+
+![hello_world Job](../images/HelloWorldJob.png)
+
+Oops, looks like our first job didn't quite work! The task is temporarily throttled for
+having failed on every attempt of the Aurora scheduler to run it. We have to figure out
+what is going wrong.
+
+On the Completed tasks tab, we see all past attempts of the Aurora scheduler to run our job.
+
+![Completed tasks tab](../images/CompletedTasks.png)
+
+We can navigate to the Task page of a failed run by clicking on the host link.
+
+![Task page](../images/TaskBreakdown.png)
+
+Once there, we see that the `hello_world` process failed. The Task page
+captures the standard error and standard output streams and makes them available.
+Clicking through to `stderr` on the failed `hello_world` process, we see what happened.
+
+![stderr page](../images/stderr.png)
+
+It looks like we made a typo in our Python script. We wanted `xrange`,
+not `xrang`. Edit the `hello_world.py` script to use the correct function
+and save it as `hello_world_v2.py`. Then update the `hello_world.aurora`
+configuration to the newest version.
+
+In order to try again, we can now instruct the scheduler to update our job:
+
+    vagrant@aurora:~$ aurora update start devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
+     INFO] Starting update for: hello_world
+    Job update has started. View your update progress at http://aurora.local:8081/scheduler/www-data/devel/hello_world/update/8ef38017-e60f-400d-a2f2-b5a8b724e95b
+
+This time, the task comes up.
+
+![Running Job](../images/RunningJob.png)
+
+By again clicking on the host, we inspect the Task page, and see that the
+`hello_world` process is running.
+
+![Running Task page](../images/runningtask.png)
+
+We then inspect the output by clicking on `stdout` and see our process'
+output:
+
+![stdout page](../images/stdout.png)
+
+## Cleanup
+
+Now that we're done, we kill the job using the Aurora client:
+
+    vagrant@aurora:~$ aurora job killall devcluster/www-data/devel/hello_world
+     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
+     INFO] Instances to be killed: [0]
+    Successfully killed instances [0]
+    Job killall succeeded
+
+The job page now shows the `hello_world` tasks as completed.
+
+![Killed Task page](../images/killedtask.png)
+
+## Next Steps
+
+Now that you've finished this Tutorial, you should read or do the following:
+
+- [The Aurora Configuration Tutorial](../../reference/configuration-tutorial/), which provides more examples
+  and best practices for writing Aurora configurations. You should also look at
+  the [Aurora Configuration Reference](../../reference/configuration/).
+- Explore the Aurora Client - use `aurora -h`, and read the
+  [Aurora Client Commands](../../reference/client-commands/) document.
diff --git a/source/documentation/latest/getting-started/vagrant.md b/source/documentation/latest/getting-started/vagrant.md
new file mode 100644
index 0000000..e4d4168
--- /dev/null
+++ b/source/documentation/latest/getting-started/vagrant.md
@@ -0,0 +1,154 @@
+A local Cluster with Vagrant
+============================
+
+This document shows you how to configure a complete cluster using a virtual machine. This setup
+replicates a real cluster in your development machine as closely as possible. After you complete
+the steps outlined here, you will be ready to create and run your first Aurora job.
+
+The following sections describe these steps in detail:
+
+1. [Overview](#overview)
+1. [Install VirtualBox and Vagrant](#install-virtualbox-and-vagrant)
+1. [Clone the Aurora repository](#clone-the-aurora-repository)
+1. [Start the local cluster](#start-the-local-cluster)
+1. [Log onto the VM](#log-onto-the-vm)
+1. [Run your first job](#run-your-first-job)
+1. [Rebuild components](#rebuild-components)
+1. [Shut down or delete your local cluster](#shut-down-or-delete-your-local-cluster)
+1. [Troubleshooting](#troubleshooting)
+
+
+Overview
+--------
+
+The Aurora distribution includes a set of scripts that enable you to create a local cluster in
+your development machine. These scripts use [Vagrant](https://www.vagrantup.com/) and
+[VirtualBox](https://www.virtualbox.org/) to run and configure a virtual machine. Once the
+virtual machine is running, the scripts install and initialize Aurora and any required components
+to create the local cluster.
+
+
+Install VirtualBox and Vagrant
+------------------------------
+
+First, download and install [VirtualBox](https://www.virtualbox.org/) on your development machine.
+
+Then download and install [Vagrant](https://www.vagrantup.com/). To verify that the installation
+was successful, open a terminal window and type the `vagrant` command. You should see a list of
+common commands for this tool.
+
+
+Clone the Aurora repository
+---------------------------
+
+To obtain the Aurora source distribution, clone its Git repository using the following command:
+
+     git clone git://git.apache.org/aurora.git
+
+
+Start the local cluster
+-----------------------
+
+Now change into the `aurora/` directory, which contains the Aurora source code and
+other scripts and tools:
+
+     cd aurora/
+
+To start the local cluster, type the following command:
+
+     vagrant up
+
+This command uses the configuration scripts in the Aurora distribution to:
+
+* Download a Linux system image.
+* Start a virtual machine (VM) and configure it.
+* Install the required build tools on the VM.
+* Install Aurora's requirements (like [Mesos](http://mesos.apache.org/) and
+[Zookeeper](http://zookeeper.apache.org/)) on the VM.
+* Build and install Aurora from source on the VM.
+* Start Aurora's services on the VM.
+
+This process takes several minutes to complete.
+
+You may notice a warning that guest additions in the VM don't match your version of VirtualBox.
+This should generally be harmless, but you may wish to install a vagrant plugin to take care of
+mismatches like this for you:
+
+     vagrant plugin install vagrant-vbguest
+
+With this plugin installed, whenever you `vagrant up` the plugin will upgrade the guest additions
+for you when a version mis-match is detected. You can read more about the plugin
+[here](https://github.com/dotless-de/vagrant-vbguest).
+
+To verify that Aurora is running on the cluster, visit the following URLs:
+
+* Scheduler - http://192.168.33.7:8081
+* Observer - http://192.168.33.7:1338
+* Mesos Master - http://192.168.33.7:5050
+* Mesos Agent - http://192.168.33.7:5051
+
+
+Log onto the VM
+---------------
+
+To SSH into the VM, run the following command in your development machine:
+
+     vagrant ssh
+
+To verify that Aurora is installed in the VM, type the `aurora` command. You should see a list
+of arguments and possible commands.
+
+The `/vagrant` directory on the VM is mapped to the `aurora/` local directory
+from which you started the cluster. You can edit files inside this directory in your development
+machine and access them from the VM under `/vagrant`.
+
+A pre-installed `clusters.json` file refers to your local cluster as `devcluster`, which you
+will use in client commands.
+
+
+Run your first job
+------------------
+
+Now that your cluster is up and running, you are ready to define and run your first job in Aurora.
+For more information, see the [Aurora Tutorial](../tutorial/).
+
+
+Rebuild components
+------------------
+
+If you are changing Aurora code and would like to rebuild a component, you can use the `aurorabuild`
+command on the VM to build and restart a component.  This is considerably faster than destroying
+and rebuilding your VM.
+
+`aurorabuild` accepts a list of components to build and update. To get a list of supported
+components, invoke the `aurorabuild` command with no arguments:
+
+     vagrant ssh -c 'aurorabuild client'
+
+
+Shut down or delete your local cluster
+--------------------------------------
+
+To shut down your local cluster, run the `vagrant halt` command in your development machine. To
+start it again, run the `vagrant up` command.
+
+Once you are finished with your local cluster, or if you would otherwise like to start from scratch,
+you can use the command `vagrant destroy` to turn off and delete the virtual file system.
+
+
+Troubleshooting
+---------------
+
+Most of the Vagrant related problems can be fixed by the following steps:
+
+* Destroying the vagrant environment with `vagrant destroy`
+* Killing any orphaned VMs (see AURORA-499) with `virtualbox` UI or `VBoxManage` command line tool
+* Cleaning the repository of build artifacts and other intermediate output with `git clean -fdx`
+* Bringing up the vagrant environment with `vagrant up`
+
+If that still doesn't solve your problem, make sure to inspect the log files:
+
+* Scheduler: `/var/log/aurora/scheduler.log` or `sudo journalctl -u aurora-scheduler`
+* Observer: `/var/log/thermos/observer.log` or `sudo journalctl -u thermos-observer`
+* Mesos Master: `/var/log/mesos/mesos-master.INFO` (also see `.WARNING` and `.ERROR`)
+* Mesos Agent: `/var/log/mesos/mesos-slave.INFO` (also see `.WARNING` and `.ERROR`)
diff --git a/source/documentation/latest/images/CompletedTasks.png b/source/documentation/latest/images/CompletedTasks.png
new file mode 100644
index 0000000..343e111
--- /dev/null
+++ b/source/documentation/latest/images/CompletedTasks.png
Binary files differ
diff --git a/source/documentation/latest/images/HelloWorldJob.png b/source/documentation/latest/images/HelloWorldJob.png
index 7a89575..4977227 100644
--- a/source/documentation/latest/images/HelloWorldJob.png
+++ b/source/documentation/latest/images/HelloWorldJob.png
Binary files differ
diff --git a/source/documentation/latest/images/RoleJobs.png b/source/documentation/latest/images/RoleJobs.png
index d41ee0b..95ffa49 100644
--- a/source/documentation/latest/images/RoleJobs.png
+++ b/source/documentation/latest/images/RoleJobs.png
Binary files differ
diff --git a/source/documentation/latest/images/RunningJob.png b/source/documentation/latest/images/RunningJob.png
new file mode 100644
index 0000000..4e7a045
--- /dev/null
+++ b/source/documentation/latest/images/RunningJob.png
Binary files differ
diff --git a/source/documentation/latest/images/ScheduledJobs.png b/source/documentation/latest/images/ScheduledJobs.png
index 21bfcae..0be4731 100644
--- a/source/documentation/latest/images/ScheduledJobs.png
+++ b/source/documentation/latest/images/ScheduledJobs.png
Binary files differ
diff --git a/source/documentation/latest/images/TaskBreakdown.png b/source/documentation/latest/images/TaskBreakdown.png
index 125a4e9..fab702c 100644
--- a/source/documentation/latest/images/TaskBreakdown.png
+++ b/source/documentation/latest/images/TaskBreakdown.png
Binary files differ
diff --git a/publish/assets/img/aurora_logo.png b/source/documentation/latest/images/aurora_logo.png
similarity index 100%
copy from publish/assets/img/aurora_logo.png
copy to source/documentation/latest/images/aurora_logo.png
Binary files differ
diff --git a/source/documentation/latest/images/components.odg b/source/documentation/latest/images/components.odg
new file mode 100644
index 0000000..a0f3b28
--- /dev/null
+++ b/source/documentation/latest/images/components.odg
Binary files differ
diff --git a/source/documentation/latest/images/components.png b/source/documentation/latest/images/components.png
new file mode 100644
index 0000000..e8228c0
--- /dev/null
+++ b/source/documentation/latest/images/components.png
Binary files differ
diff --git a/source/documentation/latest/images/debug-client-test.png b/source/documentation/latest/images/debug-client-test.png
new file mode 100644
index 0000000..8b67a10
--- /dev/null
+++ b/source/documentation/latest/images/debug-client-test.png
Binary files differ
diff --git a/source/documentation/latest/images/debugging-client-test.png b/source/documentation/latest/images/debugging-client-test.png
new file mode 100644
index 0000000..df7b7e3
--- /dev/null
+++ b/source/documentation/latest/images/debugging-client-test.png
Binary files differ
diff --git a/source/documentation/latest/images/killedtask.png b/source/documentation/latest/images/killedtask.png
index b173698..bb1bc92 100644
--- a/source/documentation/latest/images/killedtask.png
+++ b/source/documentation/latest/images/killedtask.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png b/source/documentation/latest/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
new file mode 100644
index 0000000..ea9866e
--- /dev/null
+++ b/source/documentation/latest/images/presentations/02_19_2015_aurora_adopters_panel_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png b/source/documentation/latest/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
new file mode 100644
index 0000000..c9539df
--- /dev/null
+++ b/source/documentation/latest/images/presentations/02_19_2015_aurora_at_tellapart_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/02_19_2015_aurora_at_twitter_thumb.png b/source/documentation/latest/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
new file mode 100644
index 0000000..99674c7
--- /dev/null
+++ b/source/documentation/latest/images/presentations/02_19_2015_aurora_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/02_28_2015_apache_aurora_thumb.png b/source/documentation/latest/images/presentations/02_28_2015_apache_aurora_thumb.png
new file mode 100644
index 0000000..6e0e1fb
--- /dev/null
+++ b/source/documentation/latest/images/presentations/02_28_2015_apache_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png b/source/documentation/latest/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
new file mode 100644
index 0000000..f81a128
--- /dev/null
+++ b/source/documentation/latest/images/presentations/03_07_2015_aurora_mesos_in_practice_at_twitter_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/03_25_2014_introduction_to_aurora_thumb.png b/source/documentation/latest/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
new file mode 100644
index 0000000..6ebce80
--- /dev/null
+++ b/source/documentation/latest/images/presentations/03_25_2014_introduction_to_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/04_30_2015_monolith_to_microservices_thumb.png b/source/documentation/latest/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
new file mode 100644
index 0000000..f8a3db1
--- /dev/null
+++ b/source/documentation/latest/images/presentations/04_30_2015_monolith_to_microservices_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/08_21_2014_past_present_future_thumb.png b/source/documentation/latest/images/presentations/08_21_2014_past_present_future_thumb.png
new file mode 100644
index 0000000..deaebe7
--- /dev/null
+++ b/source/documentation/latest/images/presentations/08_21_2014_past_present_future_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png b/source/documentation/latest/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
new file mode 100644
index 0000000..61ea6d4
--- /dev/null
+++ b/source/documentation/latest/images/presentations/09_20_2015_shipping_code_with_aurora_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/09_20_2015_twitter_production_scale_thumb.png b/source/documentation/latest/images/presentations/09_20_2015_twitter_production_scale_thumb.png
new file mode 100644
index 0000000..9f1f4af
--- /dev/null
+++ b/source/documentation/latest/images/presentations/09_20_2015_twitter_production_scale_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png b/source/documentation/latest/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
new file mode 100644
index 0000000..a8bb199
--- /dev/null
+++ b/source/documentation/latest/images/presentations/10_08_2015_mesos_aurora_on_a_small_scale_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png b/source/documentation/latest/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
new file mode 100644
index 0000000..ac5c6dc
--- /dev/null
+++ b/source/documentation/latest/images/presentations/10_08_2015_sla_aware_maintenance_for_operators_thumb.png
Binary files differ
diff --git a/source/documentation/latest/images/runningtask.png b/source/documentation/latest/images/runningtask.png
index 7f7553c..9e7e31a 100644
--- a/source/documentation/latest/images/runningtask.png
+++ b/source/documentation/latest/images/runningtask.png
Binary files differ
diff --git a/source/documentation/latest/images/stderr.png b/source/documentation/latest/images/stderr.png
index b83ccfa..6076a84 100644
--- a/source/documentation/latest/images/stderr.png
+++ b/source/documentation/latest/images/stderr.png
Binary files differ
diff --git a/source/documentation/latest/images/stdout.png b/source/documentation/latest/images/stdout.png
index fb4e0b7..d614097 100644
--- a/source/documentation/latest/images/stdout.png
+++ b/source/documentation/latest/images/stdout.png
Binary files differ
diff --git a/source/documentation/latest/index.html.md b/source/documentation/latest/index.html.md
new file mode 100644
index 0000000..fe770d6
--- /dev/null
+++ b/source/documentation/latest/index.html.md
@@ -0,0 +1,80 @@
+## Introduction
+
+Apache Aurora is a service scheduler that runs on top of Apache Mesos, enabling you to run
+long-running services, cron jobs, and ad-hoc jobs that take advantage of Apache Mesos' scalability,
+fault-tolerance, and resource isolation.
+
+We encourage you to ask questions on the [Aurora user list](http://aurora.apache.org/community/) or
+the `#aurora` IRC channel on `irc.freenode.net`.
+
+
+## Getting Started
+Information for everyone new to Apache Aurora.
+
+ * [Aurora System Overview](getting-started/overview/)
+ * [Hello World Tutorial](getting-started/tutorial/)
+ * [Local cluster with Vagrant](getting-started/vagrant/)
+
+## Features
+Description of important Aurora features.
+
+ * [Containers](features/containers/)
+ * [Cron Jobs](features/cron-jobs/)
+ * [Custom Executors](features/custom-executors/)
+ * [Job Updates](features/job-updates/)
+ * [Multitenancy](features/multitenancy/)
+ * [Resource Isolation](features/resource-isolation/)
+ * [Scheduling Constraints](features/constraints/)
+ * [Services](features/services/)
+ * [Service Discovery](features/service-discovery/)
+ * [SLA Metrics](features/sla-metrics/)
+ * [SLA Requirements](features/sla-requirements/)
+ * [Webhooks](features/webhooks/)
+
+## Operators
+For those that wish to manage and fine-tune an Aurora cluster.
+
+ * [Installation](operations/installation/)
+ * [Configuration](operations/configuration/)
+ * [Upgrades](operations/upgrades/)
+ * [Troubleshooting](operations/troubleshooting/)
+ * [Monitoring](operations/monitoring/)
+ * [Security](operations/security/)
+ * [Storage](operations/storage/)
+ * [Backup](operations/backup-restore/)
+
+## Reference
+The complete reference of commands, configuration options, and scheduler internals.
+
+ * [Task lifecycle](reference/task-lifecycle/)
+ * Configuration (`.aurora` files)
+    - [Configuration Reference](reference/configuration/)
+    - [Configuration Tutorial](reference/configuration-tutorial/)
+    - [Configuration Best Practices](reference/configuration-best-practices/)
+    - [Configuration Templating](reference/configuration-templating/)
+ * Aurora Client
+    - [Client Commands](reference/client-commands/)
+    - [Client Hooks](reference/client-hooks/)
+    - [Client Cluster Configuration](reference/client-cluster-configuration/)
+ * [Scheduler Configuration](reference/scheduler-configuration/)
+ * [Observer Configuration](reference/observer-configuration/)
+ * [Endpoints](reference/scheduler-endpoints/)
+
+## Additional Resources
+ * [Tools integrating with Aurora](additional-resources/tools/)
+ * [Presentation videos and slides](additional-resources/presentations/)
+
+## Developers
+All the information you need to start modifying Aurora and contributing back to the project.
+
+ * [Contributing to the project](contributing/)
+ * [Committer's Guide](development/committers-guide/)
+ * [Design Documents](development/design-documents/)
+ * Developing the Aurora components:
+     - [Client](development/client/)
+     - [Scheduler](development/scheduler/)
+     - [Scheduler UI](development/ui/)
+     - [Thermos](development/thermos/)
+     - [Thrift structures](development/thrift/)
+
+
diff --git a/source/documentation/latest/operations/backup-restore.md b/source/documentation/latest/operations/backup-restore.md
new file mode 100644
index 0000000..2230614
--- /dev/null
+++ b/source/documentation/latest/operations/backup-restore.md
@@ -0,0 +1,80 @@
+# Recovering from a Scheduler Backup
+
+**Be sure to read the entire page before attempting to restore from a backup, as it may have
+unintended consequences.**
+
+## Summary
+
+The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
+earlier, backed up, version and requires all schedulers to be taken down temporarily while
+restoring. Once completed, the scheduler state resets to what it was when the backup was created.
+This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
+be killed shortly after the cluster restarts. All other tasks continue operating as normal.
+
+Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
+hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
+so any tasks that have been rescheduled since the backup was taken will be killed.
+
+Instructions below have been verified in [Vagrant environment](../../getting-started/vagrant/) and with minor
+syntax/path changes should be applicable to any Aurora cluster.
+
+Follow these steps to prepare the cluster for restoring from a backup:
+
+##  Preparation
+
+* Stop all scheduler instances.
+
+* Pick a backup to use for rehydrating the mesos-replicated log. Backups can be found in the
+directory given to the scheduler as the `-backup_dir` argument. Backups are stored in the format
+`scheduler-backup-<yyyy-MM-dd-HH-mm>`.
+
+* If running the Aurora Scheduler in HA mode, pick a single scheduler instance to rehydrate.
+
+* Locate the `recovery-tool` in your setup. If Aurora was installed using a Debian package
+generated by our `aurora-packaging` script, the recovery tool can be found
+in `/usr/share/aurora/bin/recovery-tool`.
+
+## Cleanup
+
+* Delete (or move) the Mesos replicated log path for each scheduler instance. The location of the
+Mesos replicated log file path can be found by looking at the value given to the flag
+`-native_log_file_path` for each instance.
+
+* Initialize the Mesos replicated log files using the mesos-log tool:
+```
+sudo su -u <USER> mesos-log initialize --path=<native_log_file_path>
+```
+Where `USER` is the user under which the scheduler instance will be run. For installations using
+Debian packages, the default user will be `aurora`. You may alternatively choose to specify
+a group as well by passing the `-g <GROUP>` option to `su`.
+Note that if the user under which the Aurora scheduler instance is run _does not_ have permissions
+to read this directory and the files it contains, the instance will fail to start.
+
+## Restore from backup
+
+* Run the `recovery-tool`. Wherever the flags match those used for the scheduler instance,
+use the same values:
+```
+$ recovery-tool -from BACKUP \
+-to LOG \
+-backup=<selected_backup_location> \
+-native_log_zk_group_path=<native_log_zk_group_path> \
+-native_log_file_path=<native_log_file_path> \
+-zk_endpoints=<zk_endpoints>
+```
+
+## Bring scheduler instances back online
+
+### If running in HA Mode
+
+* Start the rehydrated scheduler instance along with enough cleaned up instances to
+meet the `-native_log_quorum_size`. The mesos-replicated log algorithm will replenish
+the "blank" scheduler instances with the information from the rehydrated instance.
+
+* Start any remaining scheduler instances.
+
+### If running in singleton mode
+
+* Start the single scheduler instance.
+
+
diff --git a/source/documentation/latest/operations/configuration.md b/source/documentation/latest/operations/configuration.md
new file mode 100644
index 0000000..ac5f226
--- /dev/null
+++ b/source/documentation/latest/operations/configuration.md
@@ -0,0 +1,380 @@
+# Scheduler Configuration
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+Examples are available under `examples/scheduler/`. For a list of available Aurora flags and their
+documentation, see [Scheduler Configuration Reference](../../reference/scheduler-configuration/).
+
+
+## A Note on Configuration
+Like Mesos, Aurora uses command-line flags for runtime configuration. As such the Aurora
+"configuration file" is typically a `scheduler.sh` shell script of the form.
+
+    #!/bin/bash
+    AURORA_HOME=/usr/local/aurora-scheduler
+
+    # Flags controlling the JVM.
+    JAVA_OPTS=(
+      -Xmx2g
+      -Xms2g
+      # GC tuning, etc.
+    )
+
+    # Flags controlling the scheduler.
+    AURORA_FLAGS=(
+      # Port for client RPCs and the web UI
+      -http_port=8081
+      # Log configuration, etc.
+    )
+
+    # Environment variables controlling libmesos
+    export JAVA_HOME=...
+    export GLOG_v=1
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+    JAVA_OPTS="${JAVA_OPTS[*]}" exec "$AURORA_HOME/bin/aurora-scheduler" "${AURORA_FLAGS[@]}"
+
+That way Aurora's current flags are visible in `ps` and in the `/vars` admin endpoint.
+
+
+## JVM Configuration
+
+JVM settings are dependent on your environment and cluster size. They might require
+custom tuning. As a starting point, we recommend:
+
+* Ensure the initial (`-Xms`) and maximum (`-Xmx`) heap size are idential to prevent heap resizing
+  at runtime.
+* Either `-XX:+UseConcMarkSweepGC` or `-XX:+UseG1GC -XX:+UseStringDeduplication` are
+  sane defaults for the garbage collector.
+* `-Djava.net.preferIPv4Stack=true` makes sense in most cases as well.
+
+
+## Network Configuration
+
+By default, Aurora binds to all interfaces and auto-discovers its hostname. To reduce ambiguity
+it helps to hardcode them though:
+
+    -http_port=8081
+    -ip=192.168.33.7
+    -hostname="aurora1.us-east1.example.org"
+
+Two environment variables control the ip and port for the communication with the Mesos master
+and for the replicated log used by Aurora:
+
+    export LIBPROCESS_PORT=8083
+    export LIBPROCESS_IP=192.168.33.7
+
+It is important that those can be reached from all Mesos master and Aurora scheduler instances.
+
+
+## Replicated Log Configuration
+
+Aurora schedulers use ZooKeeper to discover log replicas and elect a leader. Only one scheduler is
+leader at a given time - the other schedulers follow log writes and prepare to take over as leader
+but do not communicate with the Mesos master. Either 3 or 5 schedulers are recommended in a
+production deployment depending on failure tolerance and they must have persistent storage.
+
+Below is a summary of scheduler storage configuration flags that either don't have default values
+or require attention before deploying in a production environment.
+
+### `-native_log_quorum_size`
+Defines the Mesos replicated log quorum size. In a cluster with `N` schedulers, the flag
+`-native_log_quorum_size` should be set to `floor(N/2) + 1`. So in a cluster with 1 scheduler
+it should be set to `1`, in a cluster with 3 it should be set to `2`, and in a cluster of 5 it
+should be set to `3`.
+
+  Number of schedulers (N) | ```-native_log_quorum_size``` setting (```floor(N/2) + 1```)
+  ------------------------ | -------------------------------------------------------------
+  1                        | 1
+  3                        | 2
+  5                        | 3
+  7                        | 4
+
+*Incorrectly setting this flag will cause data corruption to occur!*
+
+### `-native_log_file_path`
+Location of the Mesos replicated log files. For optimal and consistent performance, consider
+allocating a dedicated disk (preferably SSD) for the replicated log. Ensure that this disk is not
+used by anything else (e.g. no process logging) and in particular that it is a real disk
+and not just a partition.
+
+Even when a dedicated disk is used, switching from `CFQ` to `deadline` I/O scheduler of Linux kernel
+can furthermore help with storage performance in Aurora ([see this ticket for details](https://issues.apache.org/jira/browse/AURORA-1211)).
+
+### `-native_log_zk_group_path`
+ZooKeeper path used for Mesos replicated log quorum discovery.
+
+See [code](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
+other available Mesos replicated log configuration options and default values.
+
+### Changing the Quorum Size
+Special care needs to be taken when changing the size of the Aurora scheduler quorum.
+Since Aurora uses a Mesos replicated log, similar steps need to be followed as when
+[changing the Mesos quorum size](http://mesos.apache.org/documentation/latest/operational-guide).
+
+As a preparation, increase `-native_log_quorum_size` on each existing scheduler and restart them.
+When updating from 3 to 5 schedulers, the quorum size would grow from 2 to 3.
+
+When starting the new schedulers, use the `-native_log_quorum_size` set to the new value. Failing to
+first increase the quorum size on running schedulers can in some cases result in corruption
+or truncating of the replicated log used by Aurora. In that case, see the documentation on
+[recovering from backup](../backup-restore/).
+
+
+## Backup Configuration
+
+Configuration options for the Aurora scheduler backup manager.
+
+* `-backup_interval`: The interval on which the scheduler writes local storage backups.
+   The default is every hour.
+* `-backup_dir`: Directory to write backups to. As stated above, this should not be co-located on the
+   same disk as the replicated log.
+* `-max_saved_backups`: Maximum number of backups to retain before deleting the oldest backup(s).
+
+
+## Resource Isolation
+
+For proper CPU, memory, and disk isolation as mentioned in our [enduser documentation](../../features/resource-isolation/),
+we recommend to add the following isolators to the `--isolation` flag of the Mesos agent:
+
+* `cgroups/cpu`
+* `cgroups/mem`
+* `disk/du`
+
+In addition, we recommend to set the following [agent flags](http://mesos.apache.org/documentation/latest/configuration/):
+
+* `--cgroups_limit_swap` to enable memory limits on both memory and swap instead of just memory.
+  Alternatively, you could disable swap on your agent hosts.
+* `--cgroups_enable_cfs` to enable hard limits on CPU resources via the CFS bandwidth limiting
+  feature.
+* `--enforce_container_disk_quota` to enable disk quota enforcement for containers.
+
+To enable the optional GPU support in Mesos, please see the GPU related flags in the
+[Mesos configuration](http://mesos.apache.org/documentation/latest/configuration/).
+To enable the corresponding feature in Aurora, you have to start the scheduler with the
+flag
+
+    -allow_gpu_resource=true
+
+If you want to use revocable resources, first follow the
+[Mesos oversubscription documentation](http://mesos.apache.org/documentation/latest/oversubscription/)
+and then set set this Aurora scheduler flag to allow receiving revocable Mesos offers:
+
+    -receive_revocable_resources=true
+
+Both CPUs and RAM are supported as revocable resources. The former is enabled by the default,
+the latter needs to be enabled via:
+
+    -enable_revocable_ram=true
+
+Unless you want to use the [default](https://github.com/apache/aurora/blob/rel/0.22.0/src/main/resources/org/apache/aurora/scheduler/tiers.json)
+tier configuration, you will also have to specify a file path:
+
+    -tier_config=path/to/tiers/config.json
+
+
+## Multi-Framework Setup
+
+Aurora holds onto Mesos offers in order to provide efficient scheduling and
+[preemption](../../features/multitenancy/#preemption). This is problematic in multi-framework
+environments as Aurora might starve other frameworks.
+
+With a downside of increased scheduling latency, Aurora can be configured to be more cooperative:
+
+* Lowering `-min_offer_hold_time` (e.g. to `1mins`) can ensure unused offers are returned back to
+  Mesos more frequently.
+* Increasing `-offer_filter_duration` (e.g to `30secs`) will instruct Mesos
+  not to re-offer rejected resources for the given duration.
+
+Setting a [minimum amount of resources](http://mesos.apache.org/documentation/latest/quota/) for
+each Mesos role can furthermore help to ensure no framework is starved entirely.
+
+
+## Containers
+
+Both the Mesos and Docker containerizers require configuration of the Mesos agent.
+
+### Mesos Containerizer
+
+The minimal agent configuration requires to enable Docker and Appc image support for the Mesos
+containerizer:
+
+    --containerizers=mesos
+    --image_providers=appc,docker
+    --isolation=filesystem/linux,docker/runtime  # as an addition to your other isolators
+
+Further details can be found in the corresponding [Mesos documentation](http://mesos.apache.org/documentation/latest/container-image/).
+
+### Docker Containerizer
+
+The [Docker containerizer](http://mesos.apache.org/documentation/latest/docker-containerizer/)
+requires the Docker engine is installed on each agent host. In addition, it  must be enabled on the
+Mesos agents by launching them with the option:
+
+    --containerizers=mesos,docker
+
+If you would like to run a container with a read-only filesystem, it may also be necessary to use
+the scheduler flag `-thermos_home_in_sandbox` in order to set HOME to the sandbox
+before the executor runs. This will make sure that the executor/runner PEX extractions happens
+inside of the sandbox instead of the container filesystem root.
+
+If you would like to supply your own parameters to `docker run` when launching jobs in docker
+containers, you may use the following flags:
+
+    -allow_docker_parameters
+    -default_docker_parameters
+
+`-allow_docker_parameters` controls whether or not users may pass their own configuration parameters
+through the job configuration files. If set to `false` (the default), the scheduler will reject
+jobs with custom parameters. *NOTE*: this setting should be used with caution as it allows any job
+owner to specify any parameters they wish, including those that may introduce security concerns
+(`privileged=true`, for example).
+
+`-default_docker_parameters` allows a cluster operator to specify a universal set of parameters that
+should be used for every container that does not have parameters explicitly configured at the job
+level. The argument accepts a multimap format:
+
+    -default_docker_parameters="read-only=true,tmpfs=/tmp,tmpfs=/run"
+
+### Common Options
+
+The following Aurora options work for both containerizers.
+
+A scheduler flag, `-global_container_mounts` allows mounting paths from the host (i.e the agent machine)
+into all containers on that host. The format is a comma separated list of host_path:container_path[:mode]
+tuples. For example `-global_container_mounts=/opt/secret_keys_dir:/mnt/secret_keys_dir:ro` mounts
+`/opt/secret_keys_dir` from the agents into all launched containers. Valid modes are `ro` and `rw`.
+
+
+## Thermos Process Logs
+
+### Log destination
+By default, Thermos will write process stdout/stderr to log files in the sandbox. Process object
+configuration allows specifying alternate log file destinations like streamed stdout/stderr or
+suppression of all log output. Default behavior can be configured for the entire cluster with the
+following flag (through the `-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-destination=both
+
+`both` configuration will send logs to files and stream to parent stdout/stderr outputs.
+
+See [Configuration Reference](../../reference/configuration/#logger) for all destination options.
+
+### Log rotation
+By default, Thermos will not rotate the stdout/stderr logs from child processes and they will grow
+without bound. An individual user may change this behavior via configuration on the Process object,
+but it may also be desirable to change the default configuration for the entire cluster.
+In order to enable rotation by default, the following flags can be applied to Thermos (through the
+`-thermos_executor_flags` argument to the Aurora scheduler):
+
+    --runner-logger-mode=rotate
+    --runner-rotate-log-size-mb=100
+    --runner-rotate-log-backups=10
+
+In the above example, each instance of the Thermos runner will rotate stderr/stdout logs once they
+reach 100 MiB in size and keep a maximum of 10 backups. If a user has provided a custom setting for
+their process, it will override these default settings.
+
+
+## Thermos Executor Wrapper
+
+If you need to do computation before starting the Thermos executor (for example, setting a different
+`--announcer-hostname` parameter for every executor), then the Thermos executor should be invoked
+inside a wrapper script. In such a case, the aurora scheduler should be started with
+`-thermos_executor_path` pointing to the wrapper script and `-thermos_executor_resources` set to a
+comma separated string of all the resources that should be copied into the sandbox (including the
+original Thermos executor). Ensure the wrapper script does not access resources outside of the
+sandbox, as when the script is run from within a Docker container those resources may not exist.
+
+For example, to wrap the executor inside a simple wrapper, the scheduler will be started like this
+`-thermos_executor_path=/path/to/wrapper.sh -thermos_executor_resources=/usr/share/aurora/bin/thermos_executor.pex`
+
+## Custom Executors
+
+The scheduler can be configured to utilize a custom executor by specifying the `-custom_executor_config` flag.
+The flag must be set to the path of a valid executor configuration file.
+
+For more information on this feature please see the custom executors [documentation](../../features/custom-executors/).
+
+## A note on increasing executor overhead
+
+Increasing executor overhead on an existing cluster, whether it be for custom executors or for Thermos,
+will result in degraded preemption performance until all task which began life with the previous
+executor configuration with less overhead are preempted/restarted.
+
+## Controlling MTTA via Update Affinity
+
+When there is high resource contention in your cluster you may experience noticably elevated job update
+times, as well as high task churn across the cluster. This is due to Aurora's first-fit scheduling
+algorithm. To alleviate this, you can enable update affinity where the Scheduler will make a best-effort
+attempt to reuse the same agent for the updated task (so long as the resources for the job are not being
+increased).
+
+To enable this in the Scheduler, you can set the following options:
+
+    -enable_update_affinity=true
+    -update_affinity_reservation_hold_time=3mins
+
+You will need to tune the hold time to match the behavior you see in your cluster. If you have extremely
+high update throughput, you might have to extend it as processing updates could easily add significant
+delays between scheduling attempts. You may also have to tune scheduling parameters to achieve the
+throughput you need in your cluster. Some relevant settings (with defaults) are:
+
+    -max_schedule_attempts_per_sec=40
+    -initial_schedule_penalty=1secs
+    -max_schedule_penalty=1mins
+    -scheduling_max_batch_size=3
+    -max_tasks_per_schedule_attempt=5
+
+There are metrics exposed by the Scheduler which can provide guidance on where the bottleneck is.
+Example metrics to look at:
+
+    - schedule_attempts_blocks (if this number is greater than 0, then task throughput is hitting
+                                limits controlled by --max_scheduler_attempts_per_sec)
+    - scheduled_task_penalty_* (metrics around scheduling penalties for tasks, if the numbers here are high
+                                then you could have high contention for resources)
+
+Most likely you'll run into limits with the number of update instances that can be processed per minute
+before you run into any other limits. So if your total work done per minute starts to exceed 2k instances,
+you may need to extend the update_affinity_reservation_hold_time.
+
+## Cluster Maintenance
+
+Aurora performs maintenance related task drains. One of the scheduler options that can control
+how often the scheduler polls for maintenance work can be controlled via,
+
+    -host_maintenance_polling_interval=1min
+
+## Enforcing SLA limitations
+
+Since tasks can specify their own `SLAPolicy`, the cluster needs to limit these SLA requirements.
+Too aggressive a requirement can permanently block any type of maintenance work
+(ex: OS/Kernel/Security upgrades) on a host and hold it hostage.
+
+An operator can control the limits for SLA requirements via these scheduler configuration options:
+
+    -max_sla_duration_secs=2hrs
+    -min_required_instances_for_sla_check=20
+
+_Note: These limits only apply for `CountSlaPolicy` and `PercentageSlaPolicy`._
+
+### Limiting Coordinator SLA
+
+With `CoordinatorSlaPolicy` the SLA calculation is off-loaded to an external HTTP service. Some
+relevant scheduler configuration options are,
+
+    -sla_coordinator_timeout=1min
+    -max_parallel_coordinated_maintenance=10
+
+Since handing off the SLA calculation to an external service can potentially block maintenance
+on hosts for an indefinite amount of time (either due to a mis-configured coordinator or due to
+a valid degraded service). In those situations the following metrics will be helpful to identify the
+offending tasks.
+
+    sla_coordinator_user_errors_*     (counter tracking number of times the coordinator for the task
+                                       returned a bad response.)
+    sla_coordinator_errors_*          (counter tracking number of times the scheduler was not able
+                                       to communicate with the coordinator of the task.)
+    sla_coordinator_lock_starvation_* (counter tracking number of times the scheduler was not able to
+                                       get the lock for the coordinator of the task.)
+
diff --git a/source/documentation/latest/operations/installation.md b/source/documentation/latest/operations/installation.md
new file mode 100644
index 0000000..12db004
--- /dev/null
+++ b/source/documentation/latest/operations/installation.md
@@ -0,0 +1,256 @@
+# Installing Aurora
+
+Source and binary distributions can be found on our
+[downloads](https://aurora.apache.org/downloads/) page.  Installing from binary packages is
+recommended for most.
+
+- [Installing the scheduler](#installing-the-scheduler)
+- [Installing worker components](#installing-worker-components)
+- [Installing the client](#installing-the-client)
+- [Installing Mesos](#installing-mesos)
+- [Troubleshooting](#troubleshooting)
+
+If our binay packages don't suite you, our package build toolchain makes it easy to build your
+own packages. See the [instructions](https://github.com/apache/aurora-packaging) to learn how.
+
+
+## Machine profiles
+
+Given that many of these components communicate over the network, there are numerous ways you could
+assemble them to create an Aurora cluster.  The simplest way is to think in terms of three machine
+profiles:
+
+### Coordinator
+**Components**: ZooKeeper, Aurora scheduler, Mesos master
+
+A small number of machines (typically 3 or 5) responsible for cluster orchestration.  In most cases
+it is fine to co-locate these components in anything but very large clusters (> 1000 machines).
+Beyond that point, operators will likely want to manage these services on separate machines.
+In particular, you will want to use separate ZooKeeper ensembles for leader election and
+service discovery. Otherwise a service discovery error or outage can take down the entire cluster.
+
+In practice, 5 coordinators have been shown to reliably manage clusters with tens of thousands of
+machines.
+
+### Worker
+**Components**: Aurora executor, Aurora observer, Mesos agent
+
+The bulk of the cluster, where services will actually run.
+
+### Client
+**Components**: Aurora client, Aurora admin client
+
+Any machines that users submit jobs from.
+
+
+## Installing the scheduler
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        sudo start mesos-master
+
+2. Install ZooKeeper
+
+        sudo apt-get install -y zookeeperd
+
+3. Install the Aurora scheduler
+
+        sudo add-apt-repository -y ppa:openjdk-r/ppa
+        sudo apt-get update
+        sudo apt-get install -y openjdk-8-jre-headless wget
+
+        sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-scheduler_0.17.0_amd64.deb
+        sudo dpkg -i aurora-scheduler_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-master
+
+2. Install ZooKeeper
+
+        sudo rpm -Uvh https://archive.cloudera.com/cdh4/one-click-install/redhat/6/x86_64/cloudera-cdh-4-0.x86_64.rpm
+        sudo yum install -y java-1.8.0-openjdk-headless zookeeper-server
+
+        sudo service zookeeper-server init
+        sudo systemctl start zookeeper-server
+
+3. Install the Aurora scheduler
+
+        sudo yum install -y wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-scheduler-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Finalizing
+By default, the scheduler will start in an uninitialized mode.  This is because external
+coordination is necessary to be certain operator error does not result in a quorum of schedulers
+starting up and believing their databases are empty when in fact they should be re-joining a
+cluster.
+
+Because of this, a fresh install of the scheduler will need intervention to start up.  First,
+stop the scheduler service.
+Ubuntu: `sudo stop aurora-scheduler`
+CentOS: `sudo systemctl stop aurora`
+
+Now initialize the database:
+
+    sudo -u aurora mkdir -p /var/lib/aurora/scheduler/db
+    sudo -u aurora mesos-log initialize --path=/var/lib/aurora/scheduler/db
+
+Now you can start the scheduler back up.
+Ubuntu: `sudo start aurora-scheduler`
+CentOS: `sudo systemctl start aurora`
+
+
+## Installing worker components
+### Ubuntu Trusty
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-ubuntu-trusty), then run:
+
+        start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo apt-get install -y python2.7 wget
+
+        # NOTE: This appears to be a missing dependency of the mesos deb package and is needed
+        # for the python mesos native bindings.
+        sudo apt-get -y install libcurl4-nss-dev
+
+        wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-executor_0.17.0_amd64.deb
+        sudo dpkg -i aurora-executor_0.17.0_amd64.deb
+
+### CentOS 7
+
+1. Install Mesos
+   Skip down to [install mesos](#mesos-on-centos-7), then run:
+
+        sudo systemctl start mesos-slave
+
+2. Install Aurora executor and observer
+
+        sudo yum install -y python2 wget
+
+        wget -c https://apache.bintray.com/aurora/centos-7/aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+        sudo yum install -y aurora-executor-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Worker Configuration
+The executor typically does not require configuration.  Command line arguments can
+be passed to the executor using a command line argument on the scheduler.
+
+The observer needs to be configured to look at the correct mesos directory in order to find task
+sandboxes. You should 1st find the Mesos working directory by looking for the Mesos agent
+`--work_dir` flag. You should see something like:
+
+        ps -eocmd | grep "mesos-slave" | grep -v grep | tr ' ' '\n' | grep "\--work_dir"
+        --work_dir=/var/lib/mesos
+
+If the flag is not set, you can view the default value like so:
+
+        mesos-slave --help
+        Usage: mesos-slave [options]
+
+          ...
+          --work_dir=VALUE      Directory path to place framework work directories
+                                (default: /tmp/mesos)
+          ...
+
+The value you find for `--work_dir`, `/var/lib/mesos` in this example, should match the Aurora
+observer value for `--mesos-root`.  You can look for that setting in a similar way on a worker
+node by grepping for `thermos_observer` and `--mesos-root`.  If the flag is not set, you can view
+the default value like so:
+
+        thermos_observer -h
+        Options:
+          ...
+          --mesos-root=MESOS_ROOT
+                                The mesos root directory to search for Thermos
+                                executor sandboxes [default: /var/lib/mesos]
+          ...
+
+In this case the default is `/var/lib/mesos` and we have a match. If there is no match, you can
+either adjust the mesos-master start script(s) and restart the master(s) or else adjust the
+Aurora observer start scripts and restart the observers.  To adjust the Aurora observer:
+
+#### Ubuntu Trusty
+
+    sudo sh -c 'echo "MESOS_ROOT=/tmp/mesos" >> /etc/default/thermos'
+
+#### CentOS 7
+
+Make an edit to add the `--mesos-root` flag resulting in something like:
+
+    grep -A5 OBSERVER_ARGS /etc/sysconfig/thermos
+    OBSERVER_ARGS=(
+      --port=1338
+      --mesos-root=/tmp/mesos
+      --log_to_disk=NONE
+      --log_to_stderr=google:INFO
+    )
+
+
+## Installing the client
+### Ubuntu Trusty
+
+    sudo apt-get install -y python2.7 wget
+
+    wget -c https://apache.bintray.com/aurora/ubuntu-trusty/aurora-tools_0.17.0_amd64.deb
+    sudo dpkg -i aurora-tools_0.17.0_amd64.deb
+
+### CentOS 7
+
+    sudo yum install -y python2 wget
+
+    wget -c https://apache.bintray.com/aurora/centos-7/aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+    sudo yum install -y aurora-tools-0.17.0-1.el7.centos.aurora.x86_64.rpm
+
+### Mac OS X
+
+    brew upgrade
+    brew install aurora-cli
+
+### Client Configuration
+Client configuration lives in a json file that describes the clusters available and how to reach
+them.  By default this file is at `/etc/aurora/clusters.json`.
+
+Jobs may be submitted to the scheduler using the client, and are described with
+[job configurations](../../reference/configuration/) expressed in `.aurora` files.  Typically you will
+maintain a single job configuration file to describe one or more deployment environments (e.g.
+dev, test, prod) for a production job.
+
+
+## Installing Mesos
+Mesos uses a single package for the Mesos master and agent.  As a result, the package dependencies
+are identical for both.
+
+### Mesos on Ubuntu Trusty
+
+    sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
+    DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
+    CODENAME=$(lsb_release -cs)
+
+    echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
+      sudo tee /etc/apt/sources.list.d/mesosphere.list
+    sudo apt-get -y update
+
+    # Use `apt-cache showpkg mesos | grep [version]` to find the exact version.
+    sudo apt-get -y install mesos=1.1.0-2.0.107.ubuntu1404_amd64.deb
+
+### Mesos on CentOS 7
+
+    sudo rpm -Uvh https://repos.mesosphere.io/el/7/noarch/RPMS/mesosphere-el-repo-7-1.noarch.rpm
+    sudo yum -y install mesos-1.1.0
+
+
+## Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions in our [Troubleshooting guide](../troubleshooting/) to help get you moving.
diff --git a/source/documentation/latest/operations/monitoring.md b/source/documentation/latest/operations/monitoring.md
new file mode 100644
index 0000000..2ceda62
--- /dev/null
+++ b/source/documentation/latest/operations/monitoring.md
@@ -0,0 +1,181 @@
+# Monitoring your Aurora cluster
+
+Before you start running important services in your Aurora cluster, it's important to set up
+monitoring and alerting of Aurora itself.  Most of your monitoring can be against the scheduler,
+since it will give you a global view of what's going on.
+
+## Reading stats
+The scheduler exposes a *lot* of instrumentation data via its HTTP interface. You can get a quick
+peek at the first few of these in our vagrant image:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars | head'
+    async_tasks_completed 1004
+    attribute_store_fetch_all_events 15
+    attribute_store_fetch_all_events_per_sec 0.0
+    attribute_store_fetch_all_nanos_per_event 0.0
+    attribute_store_fetch_all_nanos_total 3048285
+    attribute_store_fetch_all_nanos_total_per_sec 0.0
+    attribute_store_fetch_one_events 3391
+    attribute_store_fetch_one_events_per_sec 0.0
+    attribute_store_fetch_one_nanos_per_event 0.0
+    attribute_store_fetch_one_nanos_total 454690753
+
+These values are served as `Content-Type: text/plain`, with each line containing a space-separated metric
+name and value. Values may be integers, doubles, or strings (note: strings are static, others
+may be dynamic).
+
+If your monitoring infrastructure prefers JSON, the scheduler exports that as well:
+
+    $ vagrant ssh -c 'curl -s localhost:8081/vars.json | python -mjson.tool | head'
+    {
+        "async_tasks_completed": 1009,
+        "attribute_store_fetch_all_events": 15,
+        "attribute_store_fetch_all_events_per_sec": 0.0,
+        "attribute_store_fetch_all_nanos_per_event": 0.0,
+        "attribute_store_fetch_all_nanos_total": 3048285,
+        "attribute_store_fetch_all_nanos_total_per_sec": 0.0,
+        "attribute_store_fetch_one_events": 3409,
+        "attribute_store_fetch_one_events_per_sec": 0.0,
+        "attribute_store_fetch_one_nanos_per_event": 0.0,
+
+This will be the same data as above, served with `Content-Type: application/json`.
+
+## Viewing live stat samples on the scheduler
+The scheduler uses the Twitter commons stats library, which keeps an internal time-series database
+of exported variables - nearly everything in `/vars` is available for instant graphing.  This is
+useful for debugging, but is not a replacement for an external monitoring system.
+
+You can view these graphs on a scheduler at `/graphview`.  It supports some composition and
+aggregation of values, which can be invaluable when triaging a problem.  For example, if you have
+the scheduler running in vagrant, check out these links:
+[simple graph](http://192.168.33.7:8081/graphview?query=jvm_uptime_secs)
+[complex composition](http://192.168.33.7:8081/graphview?query=rate\(scheduler_log_native_append_nanos_total\)%2Frate\(scheduler_log_native_append_events\)%2F1e6)
+
+### Counters and gauges
+Among numeric stats, there are two fundamental types of stats exported: _counters_ and _gauges_.
+Counters are guaranteed to be monotonically-increasing for the lifetime of a process, while gauges
+may decrease in value.  Aurora uses counters to represent things like the number of times an event
+has occurred, and gauges to capture things like the current length of a queue.  Counters are a
+natural fit for accurate composition into [rate ratios](http://en.wikipedia.org/wiki/Rate_ratio)
+(useful for sample-resistant latency calculation), while gauges are not.
+
+# Alerting
+
+## Quickstart
+If you are looking for just bare-minimum alerting to get something in place quickly, set up alerting
+on `framework_registered` and `task_store_LOST`. These will give you a decent picture of overall
+health.
+
+## A note on thresholds
+One of the most difficult things in monitoring is choosing alert thresholds. With many of these
+stats, there is no value we can offer as a threshold that will be guaranteed to work for you. It
+will depend on the size of your cluster, number of jobs, churn of tasks in the cluster, etc. We
+recommend you start with a strict value after viewing a small amount of collected data, and then
+adjust thresholds as you see fit. Feel free to ask us if you would like to validate that your alerts
+and thresholds make sense.
+
+## Important stats
+
+### `jvm_uptime_secs`
+Type: integer counter
+
+The number of seconds the JVM process has been running. Comes from
+[RuntimeMXBean#getUptime()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/RuntimeMXBean.html#getUptime\(\))
+
+Detecting resets (decreasing values) on this stat will tell you that the scheduler is failing to
+stay alive.
+
+Look at the scheduler logs to identify the reason the scheduler is exiting.
+
+### `system_load_avg`
+Type: double gauge
+
+The current load average of the system for the last minute. Comes from
+[OperatingSystemMXBean#getSystemLoadAverage()](http://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html?is-external=true#getSystemLoadAverage\(\)).
+
+A high sustained value suggests that the scheduler machine may be over-utilized.
+
+Use standard unix tools like `top` and `ps` to track down the offending process(es).
+
+### `process_cpu_cores_utilized`
+Type: double gauge
+
+The current number of CPU cores in use by the JVM process. This should not exceed the number of
+logical CPU cores on the machine. Derived from
+[OperatingSystemMXBean#getProcessCpuTime()](http://docs.oracle.com/javase/7/docs/jre/api/management/extension/com/sun/management/OperatingSystemMXBean.html)
+
+A high sustained value indicates that the scheduler is overworked. Due to current internal design
+limitations, if this value is sustained at `1`, there is a good chance the scheduler is under water.
+
+There are two main inputs that tend to drive this figure: task scheduling attempts and status
+updates from Mesos.  You may see activity in the scheduler logs to give an indication of where
+time is being spent.  Beyond that, it really takes good familiarity with the code to effectively
+triage this.  We suggest engaging with an Aurora developer.
+
+### `task_store_LOST`
+Type: integer gauge
+
+The number of tasks stored in the scheduler that are in the `LOST` state, and have been rescheduled.
+
+If this value is increasing at a high rate, it is a sign of trouble.
+
+There are many sources of `LOST` tasks in Mesos: the scheduler, master, agent, and executor can all
+trigger this.  The first step is to look in the scheduler logs for `LOST` to identify where the
+state changes are originating.
+
+### `scheduler_resource_offers`
+Type: integer counter
+
+The number of resource offers that the scheduler has received.
+
+For a healthy scheduler, this value must be increasing over time.
+
+Assuming the scheduler is up and otherwise healthy, you will want to check if the master thinks it
+is sending offers. You should also look at the master's web interface to see if it has a large
+number of outstanding offers that it is waiting to be returned.
+
+### `framework_registered`
+Type: binary integer counter
+
+Will be `1` for the leading scheduler that is registered with the Mesos master, `0` for passive
+schedulers,
+
+A sustained period without a `1` (or where `sum() != 1`) warrants investigation.
+
+If there is no leading scheduler, look in the scheduler and master logs for why.  If there are
+multiple schedulers claiming leadership, this suggests a split brain and warrants filing a critical
+bug.
+
+### `rate(scheduler_log_native_append_nanos_total)/rate(scheduler_log_native_append_events)`
+Type: rate ratio of integer counters
+
+This composes two counters to compute a windowed figure for the latency of replicated log writes.
+
+A hike in this value suggests disk bandwidth contention.
+
+Look in scheduler logs for any reported oddness with saving to the replicated log. Also use
+standard tools like `vmstat` and `iotop` to identify whether the disk has become slow or
+over-utilized. We suggest using a dedicated disk for the replicated log to mitigate this.
+
+### `timed_out_tasks`
+Type: integer counter
+
+Tracks the number of times the scheduler has given up while waiting
+(for `-transient_task_state_timeout`) to hear back about a task that is in a transient state
+(e.g. `ASSIGNED`, `KILLING`), and has moved to `LOST` before rescheduling.
+
+This value is currently known to increase occasionally when the scheduler fails over
+([AURORA-740](https://issues.apache.org/jira/browse/AURORA-740)). However, any large spike in this
+value warrants investigation.
+
+The scheduler will log when it times out a task. You should trace the task ID of the timed out
+task into the master, agent, and/or executors to determine where the message was dropped.
+
+### `http_500_responses_events`
+Type: integer counter
+
+The total number of HTTP 500 status responses sent by the scheduler. Includes API and asset serving.
+
+An increase warrants investigation.
+
+Look in scheduler logs to identify why the scheduler returned a 500, there should be a stack trace.
diff --git a/source/documentation/latest/operations/security.md b/source/documentation/latest/operations/security.md
new file mode 100644
index 0000000..1fdf10f
--- /dev/null
+++ b/source/documentation/latest/operations/security.md
@@ -0,0 +1,362 @@
+Securing your Aurora Cluster
+============================
+
+Aurora integrates with [Apache Shiro](http://shiro.apache.org/) to provide security
+controls for its API. In addition to providing some useful features out of the box, Shiro
+also allows Aurora cluster administrators to adapt the security system to their organization’s
+existing infrastructure. The announcer in the Aurora thermos executor also supports security
+controls for talking to ZooKeeper.
+
+
+- [Enabling Security](#enabling-security)
+- [Authentication](#authentication)
+	- [HTTP Basic Authentication](#http-basic-authentication)
+		- [Server Configuration](#server-configuration)
+		- [Client Configuration](#client-configuration)
+	- [HTTP SPNEGO Authentication (Kerberos)](#http-spnego-authentication-kerberos)
+		- [Server Configuration](#server-configuration-1)
+		- [Client Configuration](#client-configuration-1)
+- [Authorization](#authorization)
+	- [Using an INI file to define security controls](#using-an-ini-file-to-define-security-controls)
+		- [Caveats](#caveats)
+- [Implementing a Custom Realm](#implementing-a-custom-realm)
+	- [Packaging a realm module](#packaging-a-realm-module)
+- [Announcer Authentication](#announcer-authentication)
+    - [ZooKeeper authentication configuration](#zookeeper-authentication-configuration)
+    - [Executor settings](#executor-settings)
+- [Scheduler HTTPS](#scheduler-https)
+- [Known Issues](#known-issues)
+
+# Enabling Security
+
+There are two major components of security:
+[authentication and authorization](http://en.wikipedia.org/wiki/Authentication#Authorization).  A
+cluster administrator may choose the approach used for each, and may also implement custom
+mechanisms for either.  Later sections describe the options available. To enable authentication
+ for the announcer, see [Announcer Authentication](#announcer-authentication)
+
+
+# Authentication
+
+The scheduler must be configured with instructions for how to process authentication
+credentials at a minimum.  There are currently two built-in authentication schemes -
+[HTTP Basic Authentication](http://en.wikipedia.org/wiki/Basic_access_authentication), and
+[SPNEGO](http://en.wikipedia.org/wiki/SPNEGO) (Kerberos).
+
+## HTTP Basic Authentication
+
+Basic Authentication is a very quick way to add *some* security.  It is supported
+by all major browsers and HTTP client libraries with minimal work.  However,
+before relying on Basic Authentication you should be aware of the [security
+considerations](http://tools.ietf.org/html/rfc2617#section-4).
+
+### Server Configuration
+
+At a minimum you need to set 4 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_realm_modules=INI_AUTHNZ
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+[users]
+sally = apple, admin
+
+[roles]
+admin = *
+```
+
+The details of the security.ini file are explained below. Note that this file contains plaintext,
+unhashed passwords.
+
+### Client Configuration
+
+To configure the client for HTTP Basic authentication, add an entry to ~/.netrc with your credentials
+
+```
+% cat ~/.netrc
+# ...
+
+machine aurora.example.com
+login sally
+password apple
+
+# ...
+```
+
+No changes are required to `clusters.json`.
+
+## HTTP SPNEGO Authentication (Kerberos)
+
+### Server Configuration
+At a minimum you need to set 6 command-line flags on the scheduler:
+
+```
+-http_authentication_mechanism=NEGOTIATE
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+-shiro_ini_path=path/to/security.ini
+```
+
+And create a security.ini file like so:
+
+```
+% cat path/to/security.ini
+[users]
+sally = _, admin
+
+[roles]
+admin = *
+```
+
+What's going on here? First, Aurora must be configured to request Kerberos credentials when presented with an
+unauthenticated request. This is achieved by setting
+
+```
+-http_authentication_mechanism=NEGOTIATE
+```
+
+Next, a Realm module must be configured to **authenticate** the current request using the Kerberos
+credentials that were requested. Aurora ships with a realm module that can do this
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN[,...]
+```
+
+The Kerberos5Realm requires a keytab file and a server principal name. The principal name will usually
+be in the form `HTTP/aurora.example.com@EXAMPLE.COM`.
+
+```
+-kerberos_server_principal=HTTP/aurora.example.com@EXAMPLE.COM
+-kerberos_server_keytab=path/to/aurora.example.com.keytab
+```
+
+The Kerberos5 realm module is authentication-only. For scheduler security to work you must also
+enable a realm module that provides an Authorizer implementation. For example, to do this using the
+IniShiroRealmModule:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ
+```
+
+You can then configure authorization using a security.ini file as described below
+(the password field is ignored). You must configure the realm module with the path to this file:
+
+```
+-shiro_ini_path=path/to/security.ini
+```
+
+### Client Configuration
+To use Kerberos on the client-side you must build Kerberos-enabled client binaries. Do this with
+
+```
+./pants binary src/main/python/apache/aurora/kerberos:kaurora
+./pants binary src/main/python/apache/aurora/kerberos:kaurora_admin
+```
+
+You must also configure each cluster where you've enabled Kerberos on the scheduler
+to use Kerberos authentication. Do this by setting `auth_mechanism` to `KERBEROS`
+in `clusters.json`.
+
+```
+% cat ~/.aurora/clusters.json
+{
+    "devcluser": {
+        "auth_mechanism": "KERBEROS",
+        ...
+    },
+    ...
+}
+```
+
+# Authorization
+Given a means to authenticate the entity a client claims they are, we need to define what privileges they have.
+
+## Using an INI file to define security controls
+
+The simplest security configuration for Aurora is an INI file on the scheduler.  For small
+clusters, or clusters where the users and access controls change relatively infrequently, this is
+likely the preferred approach.  However you may want to avoid this approach if access permissions
+are rapidly changing, or if your access control information already exists in another system.
+
+You can enable INI-based configuration with following scheduler command line arguments:
+
+```
+-http_authentication_mechanism=BASIC
+-shiro_ini_path=path/to/security.ini
+```
+
+*note* As the argument name reveals, this is using Shiro’s
+[IniRealm](http://shiro.apache.org/configuration.html#Configuration-INIConfiguration) behind
+the scenes.
+
+The INI file will contain two sections - users and roles.  Here’s an example for what might
+be in security.ini:
+
+```
+[users]
+sally = apple, admin
+jim = 123456, accounting
+becky = letmein, webapp
+larry = 654321,accounting
+steve = password
+
+[roles]
+admin = *
+accounting = thrift.AuroraAdmin:setQuota
+webapp = thrift.AuroraSchedulerManager:*:webapp
+```
+
+The users section defines user user credentials and the role(s) they are members of.  These lines
+are of the format `<user> = <password>[, <role>...]`.  As you probably noticed, the passwords are
+in plaintext and as a result read access to this file should be restricted.
+
+In this configuration, each user has different privileges for actions in the cluster because
+of the roles they are a part of:
+
+* admin is granted all privileges
+* accounting may adjust the amount of resource quota for any role
+* webapp represents a collection of jobs that represents a service, and its members may create and modify any jobs owned by it
+
+### Caveats
+You might find documentation on the Internet suggesting there are additional sections in `shiro.ini`,
+like `[main]` and `[urls]`. These are not supported by Aurora as it uses a different mechanism to configure
+those parts of Shiro. Think of Aurora's `security.ini` as a subset with only `[users]` and `[roles]` sections.
+
+## Implementing Delegated Authorization
+
+It is possible to leverage Shiro's `runAs` feature by implementing a custom Servlet Filter that provides
+the capability and passing it's fully qualified class name to the command line argument
+`-shiro_after_auth_filter`. The filter is registered in the same filter chain as the Shiro auth filters
+and is placed after the Shiro auth filters in the filter chain. This ensures that the Filter is invoked
+after the Shiro filters have had a chance to authenticate the request.
+
+# Implementing a Custom Realm
+
+Since Aurora’s security is backed by [Apache Shiro](https://shiro.apache.org), you can implement a
+custom [Realm](http://shiro.apache.org/realm.html) to define organization-specific security behavior.
+
+In addition to using Shiro's standard APIs to implement a Realm you can link against Aurora to
+access the type-safe Permissions Aurora uses. See the Javadoc for `org.apache.aurora.scheduler.spi`
+for more information.
+
+## Packaging a realm module
+Package your custom Realm(s) with a Guice module that exposes a `Set<Realm>` multibinding.
+
+```java
+package com.example;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.multibindings.Multibinder;
+import org.apache.shiro.realm.Realm;
+
+public class MyRealmModule extends AbstractModule {
+  @Override
+  public void configure() {
+    Realm myRealm = new MyRealm();
+
+    Multibinder.newSetBinder(binder(), Realm.class).addBinding().toInstance(myRealm);
+  }
+
+  static class MyRealm implements Realm {
+    // Realm implementation.
+  }
+}
+```
+
+To use your module in the scheduler, include it as a realm module based on its fully-qualified
+class name:
+
+```
+-shiro_realm_modules=KERBEROS5_AUTHN,INI_AUTHNZ,com.example.MyRealmModule
+```
+
+
+# Announcer Authentication
+The Thermos executor can be configured to authenticate with ZooKeeper and include
+an [ACL](https://zookeeper.apache.org/doc/current/zookeeperProgrammers.html#sc_ZooKeeperAccessControl)
+on the nodes it creates, which will specify
+the privileges of clients to perform different actions on these nodes.  This
+feature is enabled by specifying an ACL configuration file to the executor with the
+`--announcer-zookeeper-auth-config` command line argument.
+
+When this feature is _not_ enabled, nodes created by the executor will have 'world/all' permission
+(`ZOO_OPEN_ACL_UNSAFE`).  In most production environments, operators should specify an ACL and
+limit access.
+
+## ZooKeeper Authentication Configuration
+The configuration file must be formatted as JSON with the following schema:
+
+```json
+{
+  "auth": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>"
+    }
+  ],
+  "acl": [
+    {
+      "scheme": "<scheme>",
+      "credential": "<plain_credential>",
+      "permissions": {
+        "read": <bool>,
+        "write": <bool>,
+        "create": <bool>,
+        "delete": <bool>,
+        "admin": <bool>
+      }
+    }
+  ]
+}
+```
+
+The `scheme`
+defines the encoding of the credential field.  Note that these fields are passed directly to
+ZooKeeper (except in the case of _digest_ scheme, where the executor will hash and encode
+the credential appropriately before passing it to ZooKeeper). In addition to `acl`, a list of
+authentication credentials must be provided in `auth` to use for the connection.
+
+All properties of the `permissions` object will default to False if not provided.
+
+## Executor settings
+To enable the executor to authenticate against ZK, `--announcer-zookeeper-auth-config` should be
+set to the configuration file.
+
+
+# Scheduler HTTPS
+
+The Aurora scheduler does not provide native HTTPS support ([AURORA-343](https://issues.apache.org/jira/browse/AURORA-343)).
+It is therefore recommended to deploy it behind an HTTPS capable reverse proxy such as nginx or Apache2.
+
+A simple setup is to launch both the reverse proxy and the Aurora scheduler on the same port, but
+bind the reverse proxy to the public IP of the host and the scheduler to localhost:
+
+    -ip=127.0.0.1
+    -http_port=8081
+
+If your clients connect to the scheduler via [`proxy_url`](../../reference/scheduler-configuration/),
+you can update it to `https`. If you use the ZooKeeper based discovery instead, the scheduler
+needs to be launched via
+
+    -serverset_endpoint_name=https
+
+in order to announce its HTTPS support within ZooKeeper.
+
+
+# Known Issues
+
+While the APIs and SPIs we ship with are stable as of 0.8.0, we are aware of several incremental
+improvements. Please follow, vote, or send patches.
+
+Relevant tickets:
+* [AURORA-1248](https://issues.apache.org/jira/browse/AURORA-1248): Client retries 4xx errors
+* [AURORA-1279](https://issues.apache.org/jira/browse/AURORA-1279): Remove kerberos-specific build targets
+* [AURORA-1293](https://issues.apache.org/jira/browse/AURORA-1291): Consider defining a JSON format in place of INI
+* [AURORA-1179](https://issues.apache.org/jira/browse/AURORA-1179): Supported hashed passwords in security.ini
+* [AURORA-1295](https://issues.apache.org/jira/browse/AURORA-1295): Support security for the ReadOnlyScheduler service
diff --git a/source/documentation/latest/operations/storage.md b/source/documentation/latest/operations/storage.md
new file mode 100644
index 0000000..fc3a1c3
--- /dev/null
+++ b/source/documentation/latest/operations/storage.md
@@ -0,0 +1,96 @@
+# Aurora Scheduler Storage
+
+- [Overview](#overview)
+- [Storage Semantics](#storage-semantics)
+  - [Reads, writes, modifications](#reads-writes-modifications)
+    - [Read lifecycle](#read-lifecycle)
+    - [Write lifecycle](#write-lifecycle)
+  - [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
+  - [Population on restart](#population-on-restart)
+
+
+## Overview
+
+Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
+For example:
+
+* Task configurations and scheduled task instances
+* Job update configurations and update progress
+* Production resource quotas
+* Mesos resource offer host attributes
+
+Aurora solves its persistence needs by leveraging the
+[Mesos implementation of a Paxos replicated log](http://mesos.apache.org/documentation/latest/replicated-log-internals/)
+[[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
+[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
+[LevelDB](https://github.com/google/leveldb) storage as persistence media.
+
+Conceptually, it can be represented by the following major components:
+
+* Volatile storage: in-memory cache of all available data. Implemented via in-memory
+[H2 Database](http://www.h2database.com/html/main.html) and accessed via
+[MyBatis](http://mybatis.github.io/mybatis-3/).
+* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
+is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
+* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
+This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
+restart.
+* Backup manager: as a precaution, snapshots are periodically written out into backup files.
+This solves a [disaster recovery problem](../backup-restore/)
+in case of a complete loss or corruption of Mesos log files.
+
+![Storage hierarchy](../images/storage_hierarchy.png)
+
+
+## Storage Semantics
+
+Implementation details of the Aurora storage system. Understanding those can sometimes be useful
+when investigating performance issues.
+
+### Reads, writes, modifications
+
+All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
+grouped by the type of data they serve. Every interface defines a specific set of operations allowed
+on the data thus abstracting out the storage access and the actual persistence implementation. The
+latter is especially important in view of a general immutability of persisted data. With the Mesos
+replicated log as the underlying persistence solution, data can be read and written easily but not
+modified. All modifications are simulated by saving new versions of modified objects. This feature
+and general performance considerations justify the existence of the volatile in-memory store.
+
+#### Read lifecycle
+
+There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
+is explained [below](#atomicity-consistency-and-isolation).
+
+All reads are served from the volatile storage making reads generally cheap storage operations
+from the performance standpoint. The majority of the volatile stores are represented by the
+in-memory H2 database. This allows for rich schema definitions, queries and relationships that
+key-value storage is unable to match.
+
+#### Write lifecycle
+
+Writes are more involved operations since in addition to updating the volatile store data has to be
+appended to the replicated log. Data is not available for reads until fully ack-ed by both
+replicated log and volatile storage.
+
+### Atomicity, consistency and isolation
+
+Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
+consistency between replicated and volatile storage. In Aurora, data is first written into the
+replicated log and only then updated in the volatile store.
+
+Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
+available data. The available `Storage` interface exposes 3 major types of operations:
+* `consistentRead` - access is locked using reader's lock and provides consistent view on read
+* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
+in stale reads
+* `write` - access is fully serialized by using writer's lock. Operation success requires both
+volatile and replicated writes to succeed.
+
+The consistency of the volatile store is enforced via H2 transactional isolation.
+
+### Population on restart
+
+Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
+in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
+recover the state up to the last write.
diff --git a/source/documentation/latest/operations/troubleshooting.md b/source/documentation/latest/operations/troubleshooting.md
new file mode 100644
index 0000000..a7d4ef2
--- /dev/null
+++ b/source/documentation/latest/operations/troubleshooting.md
@@ -0,0 +1,106 @@
+# Troubleshooting
+
+So you've started your first cluster and are running into some issues? We've collected some common
+stumbling blocks and solutions here to help get you moving.
+
+## Replicated log not initialized
+
+### Symptoms
+- Scheduler RPCs and web interface claim `Storage is not READY`
+- Scheduler log repeatedly prints messages like
+
+  ```
+  I1016 16:12:27.234133 26081 replica.cpp:638] Replica in EMPTY status
+  received a broadcasted recover request
+  I1016 16:12:27.234256 26084 recover.cpp:188] Received a recover response
+  from a replica in EMPTY status
+  ```
+
+### Solution
+When you create a new cluster, you need to inform a quorum of schedulers that they are safe to
+consider their database to be empty by [initializing](../installation/#finalizing) the
+replicated log. This is done to prevent the scheduler from modifying the cluster state in the event
+of multiple simultaneous disk failures or, more likely, misconfiguration of the replicated log path.
+
+
+## No distinct leader elected
+
+### Symptoms
+Either no scheduler or multiple scheduler believe to be leading.
+
+### Solution
+Verify the [network configuration](../configuration/#network-configuration) of the Aurora
+scheduler is correct:
+
+* The `LIBPROCESS_IP:LIBPROCESS_PORT` endpoints must be reachable from all coordinator nodes running
+  a scheduler or a Mesos master.
+* Hostname lookups have to resolve to public ips rather than local ones that cannot be reached
+  from another node.
+
+In addition, double-check the [quota settings](../configuration/#replicated-log-configuration) of the
+replicated log.
+
+
+## Scheduler not registered
+
+### Symptoms
+Scheduler log contains
+
+    Framework has not been registered within the tolerated delay.
+
+### Solution
+Double-check that the scheduler is configured correctly to reach the Mesos master. If you are registering
+the master in ZooKeeper, make sure command line argument to the master:
+
+    --zk=zk://$ZK_HOST:2181/mesos/master
+
+is the same as the one on the scheduler:
+
+    -mesos_master_address=zk://$ZK_HOST:2181/mesos/master
+
+
+## Scheduler not running
+
+### Symptoms
+The scheduler process commits suicide regularly. This happens under error conditions, but
+also on purpose in regular intervals.
+
+### Solution
+Aurora is meant to be run under supervision. You have to configure a supervisor like
+[Monit](http://mmonit.com/monit/), [supervisord](http://supervisord.org/), or systemd to run the
+scheduler and restart it whenever it fails or exists on purpose.
+
+Aurora supports an active health checking protocol on its admin HTTP interface - if a `GET /health`
+times out or returns anything other than `200 OK` the scheduler process is unhealthy and should be
+restarted.
+
+For example, monit can be configured with
+
+    if failed port 8081 send "GET /health HTTP/1.0\r\n" expect "OK\n" with timeout 2 seconds for 10 cycles then restart
+
+assuming you set `-http_port=8081`.
+
+
+## Executor crashing or hanging
+
+### Symptoms
+Launched task instances never transition to `STARTING` or `RUNNING` but immediately transition
+to `FAILED` or `LOST`.
+
+### Solution
+The executor might be failing due to unknown internal errors such as a missing native dependency
+of the Mesos executor library. Open the Mesos UI and navigate to the failing
+task in question. Inspect the various log files in order to learn about what is going on.
+
+
+## Observer does not discover tasks
+
+### Symptoms
+The observer UI does not list any tasks. When navigating from the scheduler UI to the state of
+a particular task instance the observer returns `Error: 404 Not Found`.
+
+### Solution
+The observer is refreshing its internal state every couple of seconds. If waiting a few seconds
+does not resolve the issue, check that the `--mesos-root` setting of the observer and the
+`--work_dir` option of the Mesos agent are in sync. For details, see our
+[Install instructions](../installation/#worker-configuration).
diff --git a/source/documentation/latest/operations/upgrades.md b/source/documentation/latest/operations/upgrades.md
new file mode 100644
index 0000000..635faf5
--- /dev/null
+++ b/source/documentation/latest/operations/upgrades.md
@@ -0,0 +1,41 @@
+# Upgrading Aurora
+
+Aurora can be updated from one version to the next without any downtime or restarts of running
+jobs. The same holds true for Mesos.
+
+Generally speaking, Mesos and Aurora strive for a +1/-1 version compatibility, i.e. all components
+are meant to be forward and backwards compatible for at least one version. This implies it
+does not really matter in which order updates are carried out.
+
+Exceptions to this rule are documented in the [Aurora release-notes](../../../RELEASE-NOTES/)
+and the [Mesos upgrade instructions](https://mesos.apache.org/documentation/latest/upgrades/).
+
+
+## Instructions
+
+To upgrade Aurora, follow these steps:
+
+1. Update the first scheduler instance by updating its software and restarting its process.
+2. Wait until the scheduler is up and its [Replicated Log](../configuration/#replicated-log-configuration)
+   caught up with the other schedulers in the cluster. The log has caught up if `log/recovered` has
+   the value `1`. You can check the metric via `curl LIBPROCESS_IP:LIBPROCESS_PORT/metrics/snapshot`,
+   where ip and port refer to the [libmesos configuration](../configuration/#network-configuration)
+   settings of the scheduler instance.
+3. Proceed with the next scheduler until all instances are updated.
+4. Update the Aurora executor deployed to the compute nodes of your cluster. Jobs will continue
+   running with the old version of the executor, and will only be launched by the new one once
+   they are restarted eventually due to natural cluster churn.
+5. Distribute the new Aurora client to your users.
+
+
+## Best Practices
+
+Even though not absolutely mandatory, we advice to adhere to the following rules:
+
+* Never skip any major or minor releases when updating. If you have to catch up several releases you
+  have to deploy all intermediary versions. Skipping bugfix releases is acceptable though.
+* Verify all updates on a test cluster before touching your production deployments.
+* To minimize the number of failovers during updates, update the currently leading scheduler
+  instance last.
+* Update the Aurora executor on a subset of compute nodes as a canary before deploying the change to
+  the whole fleet.
diff --git a/source/documentation/latest/reference/client-cluster-configuration.md b/source/documentation/latest/reference/client-cluster-configuration.md
new file mode 100644
index 0000000..ff7a3ac
--- /dev/null
+++ b/source/documentation/latest/reference/client-cluster-configuration.md
@@ -0,0 +1,99 @@
+# Client Cluster Configuration
+
+A cluster configuration file is used by the Aurora client to describe the Aurora clusters with
+which it can communicate. Ultimately this allows client users to reference clusters with short names
+like us-east and eu.
+
+A cluster configuration is formatted as JSON.  The simplest cluster configuration is one that
+communicates with a single (non-leader-elected) scheduler.  For example:
+
+    [{
+      "name": "example",
+      "scheduler_uri": "http://localhost:55555",
+    }]
+
+
+A configuration for a leader-elected scheduler would contain something like:
+
+    [{
+      "name": "example",
+      "zk": "192.168.33.7",
+      "scheduler_zk_path": "/aurora/scheduler"
+    }]
+
+
+The following properties may be set:
+
+  **Property**             | **Type** | **Description**
+  :------------------------| :------- | :--------------
+   **name**                | String   | Cluster name (Required)
+   **slave_root**          | String   | Path to Mesos agent work dir (Required)
+   **slave_run_directory** | String   | Name of Mesos agent run dir (Required)
+   **zk**                  | String   | Hostname of ZooKeeper instance used to resolve Aurora schedulers.
+   **zk_port**             | Integer  | Port of ZooKeeper instance used to locate Aurora schedulers (Default: 2181)
+   **scheduler_zk_path**   | String   | ZooKeeper path under which scheduler instances are registered.
+   **scheduler_uri**       | String   | URI of Aurora scheduler instance.
+   **proxy_url**           | String   | Used by the client to format URLs for display.
+   **auth_mechanism**      | String   | The authentication mechanism to use when communicating with the scheduler. (Default: UNAUTHENTICATED)
+   **docker_registry**     | String   | Used by the client to resolve docker tags.
+
+
+## Details
+
+### `name`
+
+The name of the Aurora cluster represented by this entry. This name will be the `cluster` portion of
+any job keys identifying jobs running within the cluster.
+
+### `slave_root`
+
+The path on the Mesos agents where executing tasks can be found. It is used in combination with the
+`slave_run_directory` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This value should match the value passed to `mesos-slave`
+as `-work_dir`.
+
+### `slave_run_directory`
+
+The name of the directory where the task run can be found. This is used in combination with the
+`slave_root` property by `aurora task run` and `aurora task ssh` to change into the sandbox
+directory after connecting to the host. This should almost always be set to `latest`.
+
+### `zk`
+
+The hostname of the ZooKeeper instance used to resolve the Aurora scheduler. Aurora uses ZooKeeper
+to elect a leader. The client will connect to this ZooKeeper instance to determine the current
+leader. This host should match the host passed to the scheduler as `-zk_endpoints`.
+
+### `zk_port`
+
+The port on which the ZooKeeper instance is running. If not set this will default to the standard
+ZooKeeper port of 2181. This port should match the port in the host passed to the scheduler as
+`-zk_endpoints`.
+
+### `scheduler_zk_path`
+
+The path on the ZooKeeper instance under which the Aurora serverset is registered. This value should
+match the value passed to the scheduler as `-serverset_path`.
+
+### `scheduler_uri`
+
+The URI of the scheduler. This would be used in place of the ZooKeeper related configuration above
+in circumstances where direct communication with a single scheduler is needed (e.g. testing
+environments). It is strongly advised to **never** use this property for production deploys.
+
+### `proxy_url`
+
+Instead of using the hostname of the leading scheduler as the base url, if `proxy_url` is set, its
+value will be used instead. In that scenario the value for `proxy_url` would be, for example, the
+URL of your VIP in a loadbalancer or a roundrobin DNS name.
+
+### `auth_mechanism`
+
+The identifier of an authentication mechanism that the client should use when communicating with the
+scheduler. Support for values other than `UNAUTHENTICATED` requires a matching scheduler-side
+[security configuration](../../operations/security/).
+
+### `docker_registry`
+
+The URI of the Docker Registry that will be used by the Aurora client to resolve docker tags to concrete
+image ids, when using the docker binding helper, like `{{docker.image[name][tag]}}`.
diff --git a/source/documentation/latest/reference/client-commands.md b/source/documentation/latest/reference/client-commands.md
new file mode 100644
index 0000000..49b45ca
--- /dev/null
+++ b/source/documentation/latest/reference/client-commands.md
@@ -0,0 +1,339 @@
+Aurora Client Commands
+======================
+
+- [Introduction](#introduction)
+- [Cluster Configuration](#cluster-configuration)
+- [Job Keys](#job-keys)
+- [Modifying Aurora Client Commands](#modifying-aurora-client-commands)
+- [Regular Jobs](#regular-jobs)
+    - [Creating and Running a Job](#creating-and-running-a-job)
+    - [Running a Command On a Running Job](#running-a-command-on-a-running-job)
+    - [Killing a Job](#killing-a-job)
+    - [Adding Instances](#adding-instances)
+    - [Updating a Job](#updating-a-job)
+        - [Coordinated job updates](#coordinated-job-updates)
+    - [Renaming a Job](#renaming-a-job)
+    - [Restarting Jobs](#restarting-jobs)
+- [Cron Jobs](#cron-jobs)
+- [Comparing Jobs](#comparing-jobs)
+- [Viewing/Examining Jobs](#viewingexamining-jobs)
+    - [Listing Jobs](#listing-jobs)
+    - [Inspecting a Job](#inspecting-a-job)
+    - [Versions](#versions)
+    - [Checking Your Quota](#checking-your-quota)
+    - [Finding a Job on Web UI](#finding-a-job-on-web-ui)
+    - [Getting Job Status](#getting-job-status)
+    - [Opening the Web UI](#opening-the-web-ui)
+    - [SSHing to a Specific Task Machine](#sshing-to-a-specific-task-machine)
+    - [SCPing with Specific Task Machines](#scping-with-specific-task-machines)
+    - [Templating Command Arguments](#templating-command-arguments)
+
+Introduction
+------------
+
+Once you have written an `.aurora` configuration file that describes
+your Job and its parameters and functionality, you interact with Aurora
+using Aurora Client commands. This document describes all of these commands
+and how and when to use them. All Aurora Client commands start with
+`aurora`, followed by the name of the specific command and its
+arguments.
+
+*Job keys* are a very common argument to Aurora commands, as well as the
+gateway to useful information about a Job. Before using Aurora, you
+should read the next section which describes them in detail. The section
+after that briefly describes how you can modify the behavior of certain
+Aurora Client commands, linking to a detailed document about how to do
+that.
+
+This is followed by the Regular Jobs section, which describes the basic
+Client commands for creating, running, and manipulating Aurora Jobs.
+After that are sections on Comparing Jobs and Viewing/Examining Jobs. In
+other words, various commands for getting information and metadata about
+Aurora Jobs.
+
+Cluster Configuration
+---------------------
+
+The client must be able to find a configuration file that specifies available clusters. This file
+declares shorthand names for clusters, which are in turn referenced by job configuration files
+and client commands.
+
+The client will load at most two configuration files, making both of their defined clusters
+available. The first is intended to be a system-installed cluster, using the path specified in
+the environment variable `AURORA_CONFIG_ROOT`, defaulting to `/etc/aurora/clusters.json` if the
+environment variable is not set. The second is a user-installed file, located at
+`~/.aurora/clusters.json`.
+
+For more details on cluster configuration see the
+[Client Cluster Configuration](../client-cluster-configuration/) documentation.
+
+Job Keys
+--------
+
+A job key is a unique system-wide identifier for an Aurora-managed
+Job, for example `cluster1/web-team/test/experiment204`. It is a 4-tuple
+consisting of, in order, *cluster*, *role*, *environment*, and
+*jobname*, separated by /s. Cluster is the name of an Aurora
+cluster. Role is the Unix service account under which the Job
+runs. Environment is a namespace component like `devel`, `test`,
+`prod`, or `stagingN.` Jobname is the Job's name.
+
+The combination of all four values uniquely specifies the Job. If any
+one value is different from that of another job key, the two job keys
+refer to different Jobs. For example, job key
+`cluster1/tyg/prod/workhorse` is different from
+`cluster1/tyg/prod/workcamel` is different from
+`cluster2/tyg/prod/workhorse` is different from
+`cluster2/foo/prod/workhorse` is different from
+`cluster1/tyg/test/workhorse.`
+
+Role names are user accounts existing on the agent machines. If you don't know what accounts
+are available, contact your sysadmin.
+
+Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
+
+Modifying Aurora Client Commands
+--------------------------------
+
+For certain Aurora Client commands, you can define hook methods that run
+either before or after an action that takes place during the command's
+execution, as well as based on whether the action finished successfully or failed
+during execution. Basically, a hook is code that lets you extend the
+command's actions. The hook executes on the client side, specifically on
+the machine executing Aurora commands.
+
+Hooks can be associated with these Aurora Client commands.
+
+  - `job create`
+  - `job kill`
+  - `job restart`
+
+The process for writing and activating them is complex enough
+that we explain it in a devoted document, [Hooks for Aurora Client API](../client-hooks/).
+
+Regular Jobs
+------------
+
+This section covers Aurora commands related to running, killing,
+renaming, updating, and restarting a basic Aurora Job.
+
+### Creating and Running a Job
+
+    aurora job create <job key> <configuration file>
+
+Creates and then runs a Job with the specified job key based on a `.aurora` configuration file.
+The configuration file may also contain and activate hook definitions.
+
+### Running a Command On a Running Job
+
+    aurora task run CLUSTER/ROLE/ENV/NAME[/INSTANCES] <cmd>
+
+Runs a shell command on all machines currently hosting shards of a
+single Job.
+
+`run` supports the same command line wildcards used to populate a Job's
+commands; i.e. anything in the `{{mesos.*}}` and `{{thermos.*}}`
+namespaces.
+
+### Killing a Job
+
+    aurora job killall CLUSTER/ROLE/ENV/NAME
+
+Kills all Tasks associated with the specified Job, blocking until all
+are terminated. Defaults to killing all instances in the Job.
+
+The `<configuration file>` argument for `kill` is optional. Use it only
+if it contains hook definitions and activations that affect the
+kill command.
+
+### Adding Instances
+
+    aurora job add CLUSTER/ROLE/ENV/NAME/INSTANCE <count>
+
+Adds `<count>` instances to the existing job. The configuration of the new instances is derived from
+an active job instance pointed by the `/INSTANCE` part of the job specification. This command is
+a simpler way to scale out an existing job when an instance with desired task configuration
+already exists. Use `aurora update start` to add instances with a new (updated) configuration.
+
+### Updating a Job
+
+You can manage job updates using the `aurora update` command.  Please see
+[the Job Update documentation](../../features/job-updates/) for more details.
+
+
+### Renaming a Job
+
+Renaming is a tricky operation as downstream clients must be informed of
+the new name. A conservative approach
+to renaming suitable for production services is:
+
+1.  Modify the Aurora configuration file to change the role,
+    environment, and/or name as appropriate to the standardized naming
+    scheme.
+2.  Check that only these naming components have changed
+    with `aurora diff`.
+
+        aurora job diff CLUSTER/ROLE/ENV/NAME <job_configuration>
+
+3.  Create the (identical) job at the new key. You may need to request a
+    temporary quota increase.
+
+        aurora job create CLUSTER/ROLE/ENV/NEW_NAME <job_configuration>
+
+4.  Migrate all clients over to the new job key. Update all links and
+    dashboards. Ensure that both job keys run identical versions of the
+    code while in this state.
+5.  After verifying that all clients have successfully moved over, kill
+    the old job.
+
+        aurora job killall CLUSTER/ROLE/ENV/NAME
+
+6.  If you received a temporary quota increase, be sure to let the
+    powers that be know you no longer need the additional capacity.
+
+### Restarting Jobs
+
+`restart` restarts all of a job key identified Job's shards:
+
+    aurora job restart CLUSTER/ROLE/ENV/NAME[/INSTANCES]
+
+Restarts are controlled on the client side, so aborting
+the `job restart` command halts the restart operation.
+
+**Note**: `job restart` only applies its command line arguments and does not
+use or is affected by `update.config`. Restarting
+does ***not*** involve a configuration change. To update the
+configuration, use `update.config`.
+
+The `--config` argument for restart is optional. Use it only
+if it contains hook definitions and activations that affect the
+`job restart` command.
+
+Cron Jobs
+---------
+
+You can manage cron jobs using the `aurora cron` command.  Please see
+[the Cron Jobs Feature](../../features/cron-jobs/) for more details.
+
+Comparing Jobs
+--------------
+
+    aurora job diff CLUSTER/ROLE/ENV/NAME <job configuration>
+
+Compares a job configuration against a running job. By default the diff
+is determined using `diff`, though you may choose an alternate
+ diff program by specifying the `DIFF_VIEWER` environment variable.
+
+Viewing/Examining Jobs
+----------------------
+
+Above we discussed creating, killing, and updating Jobs. Here we discuss
+how to view and examine Jobs.
+
+### Listing Jobs
+
+    aurora config list <job configuration>
+
+Lists all Jobs registered with the Aurora scheduler in the named cluster for the named role.
+
+### Inspecting a Job
+
+    aurora job inspect CLUSTER/ROLE/ENV/NAME <job configuration>
+
+`inspect` verifies that its specified job can be parsed from a
+configuration file, and displays the parsed configuration.
+
+### Checking Your Quota
+
+    aurora quota get CLUSTER/ROLE
+
+Prints the production quota allocated to the role's value at the given
+cluster. Only non-[dedicated](../../features/constraints/#dedicated-attribute)
+[production](../configuration/#job-objects) jobs consume quota.
+
+### Finding a Job on Web UI
+
+When you create a job, part of the output response contains a URL that goes
+to the job's scheduler UI page. For example:
+
+    vagrant@precise64:~$ aurora job create devcluster/www-data/prod/hello /vagrant/examples/jobs/hello_world.aurora
+    INFO] Creating job hello
+    INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
+    INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
+
+You can go to the scheduler UI page for this job via `http://precise64:8081/scheduler/www-data/prod/hello`
+You can go to the overall scheduler UI page by going to the part of that URL that ends at `scheduler`; `http://precise64:8081/scheduler`
+
+Once you click through to a role page, you see Jobs arranged
+separately by pending jobs, active jobs and finished jobs.
+Jobs are arranged by role, typically a service account for
+production jobs and user accounts for test or development jobs.
+
+### Getting Job Status
+
+    aurora job status <job_key>
+
+Returns the status of recent tasks associated with the
+`job_key` specified Job in its supplied cluster. Typically this includes
+a mix of active tasks (running or assigned) and inactive tasks
+(successful, failed, and lost.)
+
+### Opening the Web UI
+
+Use the Job's web UI scheduler URL or the `aurora status` command to find out on which
+machines individual tasks are scheduled. You can open the web UI via the
+`open` command line command if invoked from your machine:
+
+    aurora job open [<cluster>[/<role>[/<env>/<job_name>]]]
+
+If only the cluster is specified, it goes directly to that cluster's
+scheduler main page. If the role is specified, it goes to the top-level
+role page. If the full job key is specified, it goes directly to the job
+page where you can inspect individual tasks.
+
+### SSHing to a Specific Task Machine
+
+    aurora task ssh <job_key> <shard number>
+
+You can have the Aurora client ssh directly to the machine that has been
+assigned a particular Job/shard number. This may be useful for quickly
+diagnosing issues such as performance issues or abnormal behavior on a
+particular machine.
+
+### SCPing with Specific Task Machines
+
+    aurora task scp [<cluster>/<role>/<env>/<job_name>/<instance_id>]:source [<cluster>/<role>/<env>/<job_name>/<instance_id>]:dest
+
+You can have the Aurora client copy file(s)/folder(s) to, from, and between
+individual tasks. The sandbox folder serves as the relative root and is the
+same folder you see when you browse `chroot` from the Scheduler task UI. You
+can also use absolute paths (like for `/tmp`), but tilde expansion is not
+supported. Currently, this command is only fully supported for Mesos
+containers. Users may use this to copy files from Docker containers but they
+cannot copy files to them.
+
+### Templating Command Arguments
+
+    aurora task run [-e] [-t THREADS] <job_key> -- <<command-line>>
+
+Given a job specification, run the supplied command on all hosts and
+return the output. You may use the standard Mustache templating rules:
+
+- `{{thermos.ports[name]}}` substitutes the specific named port of the
+  task assigned to this machine
+- `{{mesos.instance}}` substitutes the shard id of the job's task
+  assigned to this machine
+- `{{thermos.task_id}}` substitutes the task id of the job's task
+  assigned to this machine
+
+For example, the following type of pattern can be a powerful diagnostic
+tool:
+
+    aurora task run -t5 cluster1/tyg/devel/seizure -- \
+      'curl -s -m1 localhost:{{thermos.ports[http]}}/vars | grep uptime'
+
+By default, the command runs in the Task's sandbox. The `-e` option can
+run the command in the executor's sandbox. This is mostly useful for
+Aurora administrators.
+
+You can parallelize the runs by using the `-t` option.
diff --git a/source/documentation/latest/reference/client-hooks.md b/source/documentation/latest/reference/client-hooks.md
new file mode 100644
index 0000000..ee64193
--- /dev/null
+++ b/source/documentation/latest/reference/client-hooks.md
@@ -0,0 +1,228 @@
+# Hooks for Aurora Client API
+
+You can execute hook methods around Aurora API Client methods when they are called by the Aurora Command Line commands.
+
+Explaining how hooks work is a bit tricky because of some indirection about what they apply to. Basically, a hook is code that executes when a particular Aurora Client API method runs, letting you extend the method's actions. The hook executes on the client side, specifically on the machine executing Aurora commands.
+
+The catch is that hooks are associated with Aurora Client API methods, which users don't directly call. Instead, users call Aurora Command Line commands, which call Client API methods during their execution. Since which hooks run depend on which Client API methods get called, you will need to know which Command Line commands call which API methods. Later on, there is a table showing the various associations.
+
+**Terminology Note**: From now on, "method(s)" refer to Client API methods, and "command(s)" refer to Command Line commands.
+
+- [Hook Types](#hook-types)
+- [Execution Order](#execution-order)
+- [Hookable Methods](#hookable-methods)
+- [Activating and Using Hooks](#activating-and-using-hooks)
+- [.aurora Config File Settings](#aurora-config-file-settings)
+- [Command Line](#command-line)
+- [Hooks Protocol](#hooks-protocol)
+  - [pre_ Methods](#pre_-methods)
+  - [err_ Methods](#err_-methods)
+  - [post_ Methods](#post_-methods)
+- [Generic Hooks](#generic-hooks)
+- [Hooks Process Checklist](#hooks-process-checklist)
+
+
+## Hook Types
+
+Hooks have three basic types, differing by when they run with respect to their associated method.
+
+`pre_<method_name>`: When its associated method is called, the `pre_` hook executes first, then the called method. If the `pre_` hook fails, the method never runs. Later code that expected the method to succeed may be affected by this, and result in terminating the Aurora client.
+
+Note that a `pre_` hook can error-trap internally so it does not
+return `False`. Designers/contributors of new `pre_` hooks should
+consider whether or not to error-trap them. You can error trap at the
+highest level very generally and always pass the `pre_` hook by
+returning `True`. For example:
+
+    def pre_create(...):
+      do_something()  # if do_something fails with an exception, the create_job is not attempted!
+      return True
+
+    # However...
+    def pre_create(...):
+      try:
+        do_something()  # may cause exception
+      except Exception:  # generic error trap will catch it
+        pass  # and ignore the exception
+      return True  # create_job will run in any case!
+
+`post_<method_name>`: A `post_` hook executes after its associated method successfully finishes running. If it fails, the already executed method is unaffected. A `post_` hook's error is trapped, and any later operations are unaffected.
+
+`err_<method_name>`: Executes only when its associated method returns a status other than OK or throws an exception. If an `err_` hook fails, the already executed method is unaffected. An `err_` hook's error is trapped, and any later operations are unaffected.
+
+## Execution Order
+
+A command with `pre_`, `post_`, and `err_` hooks defined and activated for its called method executes in the following order when the method successfully executes:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and successfully finishes
+6. `post_` method hook runs
+7. Command code executes
+8. Command execution ends
+
+The following is what happens when, for the same command and hooks, the method associated with the command suffers an error and does not successfully finish executing:
+
+1. Command called
+2. Command code executes
+3. Method Called
+4. `pre_` method hook runs
+5. Method runs and fails
+6. `err_` method hook runs
+7. Command Code executes (if `err_` method does not end the command execution)
+8. Command execution ends
+
+Note that the `post_` and `err_` hooks for the same method can never both run for a single execution of that method.
+
+## Hookable Methods
+
+You can associate `pre_`, `post_`, and `err_` hooks with the following methods. Since you do not directly interact with the methods, but rather the Aurora Command Line commands that call them, for each method we also list the command(s) that can call the method. Note that a different method or methods may be called by a command depending on how the command's other code executes. Similarly, multiple commands can call the same method. We also list the methods' argument signatures, which are used by their associated hooks. <a name="Chart"></a>
+
+  Aurora Client API Method | Client API Method Argument Signature | Aurora Command Line Command
+  -------------------------| ------------------------------------- | ---------------------------
+  ```create_job``` | ```self```, ```config``` | ```job create```, <code>runtask
+  ```restart``` | ```self```, ```job_key```, ```shards```, ```update_config```, ```health_check_interval_seconds``` | ```job restart```
+  ```kill_job``` | ```self```, ```job_key```, ```shards=None``` |  ```job kill```
+  ```start_cronjob``` | ```self```, ```job_key``` | ```cron start```
+  ```start_job_update``` | ```self```, ```config```, ```instances=None``` | ```update start```
+
+Some specific examples:
+
+* `pre_create_job` executes when a `create_job` method is called, and before the `create_job` method itself executes.
+
+* `post_cancel_update` executes after a `cancel_update` method has successfully finished running.
+
+* `err_kill_job` executes when the `kill_job` method is called, but doesn't successfully finish running.
+
+## Activating and Using Hooks
+
+By default, hooks are inactive. If you do not want to use hooks, you do not need to make any changes to your code. If you do want to use hooks, you will need to alter your `.aurora` config file to activate them both for the configuration as a whole as well as for individual `Job`s. And, of course, you will need to define in your config file what happens when a particular hook executes.
+
+## .aurora Config File Settings
+
+You can define a top-level `hooks` variable in any `.aurora` config file. `hooks` is a list of all objects that define hooks used by `Job`s defined in that config file. If you do not want to define any hooks for a configuration, `hooks` is optional.
+
+    hooks = [Object_with_defined_hooks1, Object_with_defined_hooks2]
+
+Be careful when assembling a config file using `include` on multiple smaller config files. If there are multiple files that assign a value to `hooks`, only the last assignment made will stick. For example, if `x.aurora` has `hooks = [a, b, c]` and `y.aurora` has `hooks = [d, e, f]` and `z.aurora` has, in this order, `include x.aurora` and `include y.aurora`, the `hooks` value will be `[d, e, f]`.
+
+Also, for any `Job` that you want to use hooks with, its `Job` definition in the `.aurora` config file must set an `enable_hooks` flag to `True` (it defaults to `False`). By default, hooks are disabled and you must enable them for `Job`s of your choice.
+
+To summarize, to use hooks for a particular job, you must both activate hooks for your config file as a whole, and for that job. Activating hooks only for individual jobs won't work, nor will only activating hooks for your config file as a whole. You must also specify the hooks' defining object in the `hooks` variable.
+
+Recall that `.aurora` config files are written in Pystachio. So the following turns on hooks for production jobs at cluster1 and cluster2, but leaves them off for similar jobs with a defined user role. Of course, you also need to list the objects that define the hooks in your config file's `hooks` variable.
+
+    jobs = [
+            Job(enable_hooks = True, cluster = c, env = 'prod') for c in ('cluster1', 'cluster2')
+           ]
+    jobs.extend(
+       Job(cluster = c, env = 'prod', role = getpass.getuser()) for c in ('cluster1', 'cluster2'))
+       # Hooks disabled for these jobs
+
+## Command Line
+
+All Aurora Command Line commands now accept an `.aurora` config file as an optional parameter (some, of course, accept it as a required parameter). Whenever a command has a `.aurora` file parameter, any hooks specified and activated in the `.aurora` file can be used. For example:
+
+    aurora job restart cluster1/role/env/app myapp.aurora
+
+The command activates any hooks specified and activated in `myapp.aurora`. For the `restart` command, that is the only thing the `myapp.aurora` parameter does. So, if the command was the following, since there is no `.aurora` config file to specify any hooks, no hooks on the `restart` command can run.
+
+    aurora job restart cluster1/role/env/app
+
+## Hooks Protocol
+
+Any object defined in the `.aurora` config file can define hook methods. You should define your hook methods within a class, and then use the class name as a value in the `hooks` list in your config file.
+
+Note that you can define other methods in the class that its hook methods can call; all the logic of a hook does not have to be in its definition.
+
+The following example defines a class containing a `pre_kill_job` hook definition that calls another method defined in the class.
+
+    # Defines a method pre_kill_job
+    class KillConfirmer(object):
+      def confirm(self, msg):
+        return raw_input(msg).lower() == 'yes'
+
+      def pre_kill_job(self, job_key, shards=None):
+        shards = ('shards %s' % shards) if shards is not None else 'all shards'
+        return self.confirm('Are you sure you want to kill %s (%s)? (yes/no): '
+                            % (job_key, shards))
+
+### pre_ Methods
+
+`pre_` methods have the signature:
+
+    pre_<API method name>(self, <associated method's signature>)
+
+`pre_` methods have the same signature as their associated method, with the addition of `self` as the first parameter. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `pre_` methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+If this method returns False, the API command call aborts.
+
+### err_ Methods
+
+`err_` methods have the signature:
+
+    err_<API method name>(self, exc, <associated method's signature>)
+
+`err_` methods have the same signature as their associated method, with the addition of a first parameter `self` and a second parameter `exc`. `exc` is either a result with responseCode other than `ResponseCode.OK` or an `Exception`. See the [chart](#Chart) above for the mapping of parameters to methods. When writing `err`_ methods, you can use the `*` and `**` syntax to designate that all unspecified parameters are passed in a list to the `*`ed variable and all named parameters with values are passed as name/value pairs to the `**`ed variable.
+
+`err_` method return codes are ignored.
+
+### post_ Methods
+
+`post_` methods have the signature:
+
+    post_<API method name>(self, result, <associated method signature>)
+
+`post_` method parameters are `self`, then `result`, followed by the same parameter signature as their associated method. `result` is the result of the associated method call. See the [chart](#chart) above for the mapping of parameters to methods. When writing `post_` methods, you can use the `*` and `**` syntax to designate that all unspecified arguments are passed in a list to the `*`ed parameter and all unspecified named arguments with values are passed as name/value pairs to the `**`ed parameter.
+
+`post_` method return codes are ignored.
+
+## Generic Hooks
+
+There are seven Aurora API Methods which any of the three hook types can attach to. Thus, there are 21 possible hook/method combinations for a single `.aurora` config file. Say that you define `pre_` and `post_` hooks for the `restart` method. That leaves 19 undefined hook/method combinations; `err_restart` and the 3 `pre_`, `post_`, and `err_` hooks for each of the other 6 hookable methods. You can define what happens when any of these otherwise undefined 19 hooks execute via a generic hook, whose signature is:
+
+    generic_hook(self, hook_config, event, method_name, result_or_err, args*, kw**)
+
+where:
+
+* `hook_config` is a named tuple of `config` (the Pystashio `config` object) and `job_key`.
+
+* `event` is one of `pre`, `err`, or `post`, indicating which type of hook the genetic hook is standing in for. For example, assume no specific hooks were defined for the `restart` API command. If `generic_hook` is defined and activated, and `restart` is called, `generic_hook` will effectively run as `pre_restart`, `post_restart`, and `err_restart`. You can use a selection statement on this value so that `generic_hook` will act differently based on whether it is standing in for a `pre_`, `post_`, or `err_` hook.
+
+* `method_name` is the Client API method name whose execution is causing this execution of the `generic_hook`.
+
+* `args*`, `kw**` are the API method arguments and keyword arguments respectively.
+* `result_or_err` is a tri-state parameter taking one of these three values:
+  1. None for `pre_`hooks
+  2. `result` for `post_` nooks
+  3. `exc` for `err_` hooks
+
+Example:
+
+    # Overrides the standard do-nothing generic_hook by adding a log writing operation.
+    from twitter.common import log
+      class Logger(object):
+        '''Adds to the log every time a hookable API method is called'''
+        def generic_hook(self, hook_config, event, method_name, result_or_err, *args, **kw)
+          log.info('%s: %s_%s of %s'
+                   % (self.__class__.__name__, event, method_name, hook_config.job_key))
+
+## Hooks Process Checklist
+
+1. In your `.aurora` config file, add a `hooks` variable. Note that you may want to define a `.aurora` file only for hook definitions and then include this file in multiple other config files that you want to use the same hooks.
+
+    hooks = []
+
+2. In the `hooks` variable, list all objects that define hooks used by `Job`s defined in this config:
+
+    hooks = [Object_hook_definer1, Object_hook_definer2]
+
+3. For each job that uses hooks in this config file, add `enable_hooks = True` to the `Job` definition. Note that this is necessary even if you only want to use the generic hook.
+
+4. Write your `pre_`, `post_`, and `err_` hook definitions as part of an object definition in your `.aurora` config file.
+
+5. If desired, write your `generic_hook` definition as part of an object definition in your `.aurora` config file. Remember, the object must be listed as a member of `hooks`.
+
+6. If your Aurora command line command does not otherwise take an `.aurora` config file argument, add the appropriate `.aurora` file as an argument in order to define and activate the configuration's hooks.
diff --git a/source/documentation/latest/reference/configuration-best-practices.md b/source/documentation/latest/reference/configuration-best-practices.md
new file mode 100644
index 0000000..9ce7fcb
--- /dev/null
+++ b/source/documentation/latest/reference/configuration-best-practices.md
@@ -0,0 +1,187 @@
+Aurora Configuration Best Practices
+===================================
+
+Use As Few .aurora Files As Possible
+------------------------------------
+
+When creating your `.aurora` configuration, try to keep all versions of
+a particular job within the same `.aurora` file. For example, if you
+have separate jobs for `cluster1`, `cluster1` staging, `cluster1`
+testing, and`cluster2`, keep them as close together as possible.
+
+Constructs shared across multiple jobs owned by your team (e.g.
+team-level defaults or structural templates) can be split into separate
+`.aurora`files and included via the `include` directive.
+
+
+Avoid Boilerplate
+------------------
+
+If you see repetition or find yourself copy and pasting any parts of
+your configuration, it's likely an opportunity for templating. Take the
+example below:
+
+`redundant.aurora` contains:
+
+    download = Process(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'rm -rf Python-2.7.3 && tar xzf Python-2.7.3.tar.bz2',
+      max_failures = 5,
+      min_duration = 1)
+
+    build = Process(
+      name = 'build',
+      cmdline = 'pushd Python-2.7.3 && ./configure && make && popd',
+      max_failures = 1)
+
+    email = Process(
+      name = 'email',
+      cmdline = 'echo Success | mail feynman@tmc.com',
+      max_failures = 5,
+      min_duration = 1)
+
+    build_python = Task(
+      name = 'build_python',
+      processes = [download, unpack, build, email],
+      constraints = [Constraint(order = ['download', 'unpack', 'build', 'email'])])
+
+As you'll notice, there's a lot of repetition in the `Process`
+definitions. For example, almost every process sets a `max_failures`
+limit to 5 and a `min_duration` to 1. This is an opportunity for factoring
+into a common process template.
+
+Furthermore, the Python version is repeated everywhere. This can be
+bound via structural templating as described in the [Advanced Binding](../configuration-templating/#AdvancedBinding)
+section.
+
+`less_redundant.aurora` contains:
+
+    class Python(Struct):
+      version = Required(String)
+      base = Default(String, 'Python-{{version}}')
+      package = Default(String, '{{base}}.tar.bz2')
+
+    ReliableProcess = Process(
+      max_failures = 5,
+      min_duration = 1)
+
+    download = ReliableProcess(
+      name = 'download',
+      cmdline = 'wget http://www.python.org/ftp/python/{{python.version}}/{{python.package}}')
+
+    unpack = ReliableProcess(
+      name = 'unpack',
+      cmdline = 'rm -rf {{python.base}} && tar xzf {{python.package}}')
+
+    build = ReliableProcess(
+      name = 'build',
+      cmdline = 'pushd {{python.base}} && ./configure && make && popd',
+      max_failures = 1)
+
+    email = ReliableProcess(
+      name = 'email',
+      cmdline = 'echo Success | mail {{role}}@foocorp.com')
+
+    build_python = SequentialTask(
+      name = 'build_python',
+      processes = [download, unpack, build, email]).bind(python = Python(version = "2.7.3"))
+
+
+Thermos Uses bash, But Thermos Is Not bash
+-------------------------------------------
+
+#### Bad
+
+Many tiny Processes makes for harder to manage configurations.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'rcp user@my_machine:my_application .'
+     )
+
+     unpack = Process(
+       name = 'unpack',
+       cmdline = 'unzip app.zip'
+     )
+
+     remove = Process(
+       name = 'remove',
+       cmdline = 'rm -f app.zip'
+     )
+
+     run = Process(
+       name = 'app',
+       cmdline = 'java -jar app.jar'
+     )
+
+     run_task = Task(
+       processes = [copy, unpack, remove, run],
+       constraints = order(copy, unpack, remove, run)
+     )
+
+#### Good
+
+Each `cmdline` runs in a bash subshell, so you have the full power of
+bash. Chaining commands with `&&` or `||` is almost always the right
+thing to do.
+
+Also for Tasks that are simply a list of processes that run one after
+another, consider using the `SequentialTask` helper which applies a
+linear ordering constraint for you.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'rcp user@my_machine:my_application . && unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+
+Rarely Use Functions In Your Configurations
+-------------------------------------------
+
+90% of the time you define a function in a `.aurora` file, you're
+probably Doing It Wrong(TM).
+
+#### Bad
+
+    def get_my_task(name, user, cpu, ram, disk):
+      return Task(
+        name = name,
+        user = user,
+        processes = [STAGE_PROCESS, RUN_PROCESS],
+        constraints = order(STAGE_PROCESS, RUN_PROCESS),
+        resources = Resources(cpu = cpu, ram = ram, disk = disk)
+     )
+
+     task_one = get_my_task('task_one', 'feynman', 1.0, 32*MB, 1*GB)
+     task_two = get_my_task('task_two', 'feynman', 2.0, 64*MB, 1*GB)
+
+#### Good
+
+This one is more idiomatic. Forced keyword arguments prevents accidents,
+e.g. constructing a task with "32*MB" when you mean 32MB of ram and not
+disk. Less proliferation of task-construction techniques means
+easier-to-read, quicker-to-understand, and a more composable
+configuration.
+
+    TASK_TEMPLATE = SequentialTask(
+      user = 'wickman',
+      processes = [STAGE_PROCESS, RUN_PROCESS],
+    )
+
+    task_one = TASK_TEMPLATE(
+      name = 'task_one',
+      resources = Resources(cpu = 1.0, ram = 32*MB, disk = 1*GB) )
+
+    task_two = TASK_TEMPLATE(
+      name = 'task_two',
+      resources = Resources(cpu = 2.0, ram = 64*MB, disk = 1*GB)
+    )
diff --git a/source/documentation/latest/reference/configuration-templating.md b/source/documentation/latest/reference/configuration-templating.md
new file mode 100644
index 0000000..16d5c9d
--- /dev/null
+++ b/source/documentation/latest/reference/configuration-templating.md
@@ -0,0 +1,306 @@
+Aurora Configuration Templating
+===============================
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a templating library called
+*Pystachio*, a powerful tool for configuration specification and reuse.
+
+[Aurora Configuration Reference](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the *Aurora+Thermos Configuration
+Reference* without `import` statements - the Aurora config loader
+injects them automatically. Other than that the `.aurora` format
+works like any other Python script.
+
+
+Templating 1: Binding in Pystachio
+----------------------------------
+
+Pystachio uses the visually distinctive {{}} to indicate template
+variables. These are often called "mustache variables" after the
+similarly appearing variables in the Mustache templating system and
+because the curly braces resemble mustaches.
+
+If you are familiar with the Mustache system, templates in Pystachio
+have significant differences. They have no nesting, joining, or
+inheritance semantics. On the other hand, when evaluated, templates
+are evaluated iteratively, so this affords some level of indirection.
+
+Let's start with the simplest template; text with one
+variable, in this case `name`;
+
+    Hello {{name}}
+
+If we evaluate this as is, we'd get back:
+
+    Hello
+
+If a template variable doesn't have a value, when evaluated it's
+replaced with nothing. If we add a binding to give it a value:
+
+    { "name" : "Tom" }
+
+We'd get back:
+
+    Hello Tom
+
+Every Pystachio object has an associated `.bind` method that can bind
+values to {{}} variables. Bindings are not immediately evaluated.
+Instead, they are evaluated only when the interpolated value of the
+object is necessary, e.g. for performing equality or serializing a
+message over the wire.
+
+Objects with and without mustache templated variables behave
+differently:
+
+    >>> Float(1.5)
+    Float(1.5)
+
+    >>> Float('{{x}}.5')
+    Float({{x}}.5)
+
+    >>> Float('{{x}}.5').bind(x = 1)
+    Float(1.5)
+
+    >>> Float('{{x}}.5').bind(x = 1) == Float(1.5)
+    True
+
+    >>> contextual_object = String('{{metavar{{number}}}}').bind(
+    ... metavar1 = "first", metavar2 = "second")
+
+    >>> contextual_object
+    String({{metavar{{number}}}})
+
+    >>> contextual_object.bind(number = 1)
+    String(first)
+
+    >>> contextual_object.bind(number = 2)
+    String(second)
+
+You usually bind simple key to value pairs, but you can also bind three
+other objects: lists, dictionaries, and structurals. These will be
+described in detail later.
+
+
+### Structurals in Pystachio / Aurora
+
+Most Aurora/Thermos users don't ever (knowingly) interact with `String`,
+`Float`, or `Integer` Pystashio objects directly. Instead they interact
+with derived structural (`Struct`) objects that are collections of
+fundamental and structural objects. The structural object components are
+called *attributes*. Aurora's most used structural objects are `Job`,
+`Task`, and `Process`:
+
+    class Process(Struct):
+      cmdline = Required(String)
+      name = Required(String)
+      max_failures = Default(Integer, 1)
+      daemon = Default(Boolean, False)
+      ephemeral = Default(Boolean, False)
+      min_duration = Default(Integer, 5)
+      final = Default(Boolean, False)
+
+Construct default objects by following the object's type with (). If you
+want an attribute to have a value different from its default, include
+the attribute name and value inside the parentheses.
+
+    >>> Process()
+    Process(daemon=False, max_failures=1, ephemeral=False,
+      min_duration=5, final=False)
+
+Attribute values can be template variables, which then receive specific
+values when creating the object.
+
+    >>> Process(cmdline = 'echo {{message}}')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo {{message}}, final=False)
+
+    >>> Process(cmdline = 'echo {{message}}').bind(message = 'hello world')
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+A powerful binding property is that all of an object's children inherit its
+bindings:
+
+    >>> List(Process)([
+    ... Process(name = '{{prefix}}_one'),
+    ... Process(name = '{{prefix}}_two')
+    ... ]).bind(prefix = 'hello')
+    ProcessList(
+      Process(daemon=False, name=hello_one, max_failures=1, ephemeral=False, min_duration=5, final=False),
+      Process(daemon=False, name=hello_two, max_failures=1, ephemeral=False, min_duration=5, final=False)
+      )
+
+Remember that an Aurora Job contains Tasks which contain Processes. A
+Job level binding is inherited by its Tasks and all their Processes.
+Similarly a Task level binding is available to that Task and its
+Processes but is *not* visible at the Job level (inheritance is a
+one-way street.)
+
+#### Mustaches Within Structurals
+
+When you define a `Struct` schema, one powerful, but confusing, feature
+is that all of that structure's attributes are Mustache variables within
+the enclosing scope *once they have been populated*.
+
+For example, when `Process` is defined above, all its attributes such as
+{{`name`}}, {{`cmdline`}}, {{`max_failures`}} etc., are all immediately
+defined as Mustache variables, implicitly bound into the `Process`, and
+inherit all child objects once they are defined.
+
+Thus, you can do the following:
+
+    >>> Process(name = "installer", cmdline = "echo {{name}} is running")
+    Process(daemon=False, name=installer, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo installer is running, final=False)
+
+WARNING: This binding only takes place in one direction. For example,
+the following does NOT work and does not set the `Process` `name`
+attribute's value.
+
+    >>> Process().bind(name = "installer")
+    Process(daemon=False, max_failures=1, ephemeral=False, min_duration=5, final=False)
+
+The following is also not possible and results in an infinite loop that
+attempts to resolve `Process.name`.
+
+    >>> Process(name = '{{name}}').bind(name = 'installer')
+
+Do not confuse Structural attributes with bound Mustache variables.
+Attributes are implicitly converted to Mustache variables but not vice
+versa.
+
+### Templating 2: Structurals Are Factories
+
+#### A Second Way of Templating
+
+A second templating method is both as powerful as the aforementioned and
+often confused with it. This method is due to automatic conversion of
+Struct attributes to Mustache variables as described above.
+
+Suppose you create a Process object:
+
+    >>> p = Process(name = "process_one", cmdline = "echo hello world")
+
+    >>> p
+    Process(daemon=False, name=process_one, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+This `Process` object, "`p`", can be used wherever a `Process` object is
+needed. It can also be reused by changing the value(s) of its
+attribute(s). Here we change its `name` attribute from `process_one` to
+`process_two`.
+
+    >>> p(name = "process_two")
+    Process(daemon=False, name=process_two, max_failures=1, ephemeral=False, min_duration=5,
+            cmdline=echo hello world, final=False)
+
+Template creation is a common use for this technique:
+
+    >>> Daemon = Process(daemon = True)
+    >>> logrotate = Daemon(name = 'logrotate', cmdline = './logrotate conf/logrotate.conf')
+    >>> mysql = Daemon(name = 'mysql', cmdline = 'bin/mysqld --safe-mode')
+
+### Advanced Binding
+
+As described above, `.bind()` binds simple strings or numbers to
+Mustache variables. In addition to Structural types formed by combining
+atomic types, Pystachio has two container types; `List` and `Map` which
+can also be bound via `.bind()`.
+
+#### Bind Syntax
+
+The `bind()` function can take Python dictionaries or `kwargs`
+interchangeably (when "`kwargs`" is in a function definition, `kwargs`
+receives a Python dictionary containing all keyword arguments after the
+formal parameter list).
+
+    >>> String('{{foo}}').bind(foo = 'bar') == String('{{foo}}').bind({'foo': 'bar'})
+    True
+
+Bindings done "closer" to the object in question take precedence:
+
+    >>> p = Process(name = '{{context}}_process')
+    >>> t = Task().bind(context = 'global')
+    >>> t(processes = [p, p.bind(context = 'local')])
+    Task(processes=ProcessList(
+      Process(daemon=False, name=global_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5),
+      Process(daemon=False, name=local_process, max_failures=1, ephemeral=False, final=False,
+              min_duration=5)
+    ))
+
+#### Binding Complex Objects
+
+##### Lists
+
+    >>> fibonacci = List(Integer)([1, 1, 2, 3, 5, 8, 13])
+    >>> String('{{fib[4]}}').bind(fib = fibonacci)
+    String(5)
+
+##### Maps
+
+    >>> first_names = Map(String, String)({'Kent': 'Clark', 'Wayne': 'Bruce', 'Prince': 'Diana'})
+    >>> String('{{first[Kent]}}').bind(first = first_names)
+    String(Clark)
+
+##### Structurals
+
+    >>> String('{{p.cmdline}}').bind(p = Process(cmdline = "echo hello world"))
+    String(echo hello world)
+
+### Structural Binding
+
+Use structural templates when binding more than two or three individual
+values at the Job or Task level. For fewer than two or three, standard
+key to string binding is sufficient.
+
+Structural binding is a very powerful pattern and is most useful in
+Aurora/Thermos for doing Structural configuration. For example, you can
+define a job profile. The following profile uses `HDFS`, the Hadoop
+Distributed File System, to designate a file's location. `HDFS` does
+not come with Aurora, so you'll need to either install it separately
+or change the way the dataset is designated.
+
+    class Profile(Struct):
+      version = Required(String)
+      environment = Required(String)
+      dataset = Default(String, hdfs://home/aurora/data/{{environment}}')
+
+    PRODUCTION = Profile(version = 'live', environment = 'prod')
+    DEVEL = Profile(version = 'latest',
+                    environment = 'devel',
+                    dataset = 'hdfs://home/aurora/data/test')
+    TEST = Profile(version = 'latest', environment = 'test')
+
+    JOB_TEMPLATE = Job(
+      name = 'application',
+      role = 'myteam',
+      cluster = 'cluster1',
+      environment = '{{profile.environment}}',
+      task = SequentialTask(
+        name = 'task',
+        resources = Resources(cpu = 2, ram = 4*GB, disk = 8*GB),
+        processes = [
+      Process(name = 'main', cmdline = 'java -jar application.jar -hdfsPath
+                 {{profile.dataset}}')
+        ]
+       )
+     )
+
+    jobs = [
+      JOB_TEMPLATE(instances = 100).bind(profile = PRODUCTION),
+      JOB_TEMPLATE.bind(profile = DEVEL),
+      JOB_TEMPLATE.bind(profile = TEST),
+     ]
+
+In this case, a custom structural "Profile" is created to self-document
+the configuration to some degree. This also allows some schema
+"type-checking", and for default self-substitution, e.g. in
+`Profile.dataset` above.
+
+So rather than a `.bind()` with a half-dozen substituted variables, you
+can bind a single object that has sensible defaults stored in a single
+place.
diff --git a/source/documentation/latest/reference/configuration-tutorial.md b/source/documentation/latest/reference/configuration-tutorial.md
new file mode 100644
index 0000000..79705de
--- /dev/null
+++ b/source/documentation/latest/reference/configuration-tutorial.md
@@ -0,0 +1,533 @@
+Aurora Configuration Tutorial
+=============================
+
+How to write Aurora configuration files, including feature descriptions
+and best practices. When writing a configuration file, make use of
+`aurora job inspect`. It takes the same job key and configuration file
+arguments as `aurora job create` or `aurora update start`. It first ensures the
+configuration parses, then outputs it in human-readable form.
+
+You should read this after going through the general [Aurora Tutorial](../../getting-started/tutorial/).
+
+- [The Basics](#the-basics)
+	- [Use Bottom-To-Top Object Ordering](#use-bottom-to-top-object-ordering)
+- [An Example Configuration File](#an-example-configuration-file)
+- [Defining Process Objects](#defining-process-objects)
+- [Getting Your Code Into The Sandbox](#getting-your-code-into-the-sandbox)
+- [Defining Task Objects](#defining-task-objects)
+	- [SequentialTask: Running Processes in Parallel or Sequentially](#sequentialtask-running-processes-in-parallel-or-sequentially)
+	- [SimpleTask](#simpletask)
+	- [Combining tasks](#combining-tasks)
+- [Defining Job Objects](#defining-job-objects)
+- [The jobs List](#the-jobs-list)
+- [Basic Examples](#basic-examples)
+
+
+The Basics
+----------
+
+To run a job on Aurora, you must specify a configuration file that tells
+Aurora what it needs to know to schedule the job, what Mesos needs to
+run the tasks the job is made up of, and what Thermos needs to run the
+processes that make up the tasks. This file must have
+a`.aurora` suffix.
+
+A configuration file defines a collection of objects, along with parameter
+values for their attributes. An Aurora configuration file contains the
+following three types of objects:
+
+- Job
+- Task
+- Process
+
+A configuration also specifies a list of `Job` objects assigned
+to the variable `jobs`.
+
+- jobs (list of defined Jobs to run)
+
+The `.aurora` file format is just Python. However, `Job`, `Task`,
+`Process`, and other classes are defined by a type-checked dictionary
+templating library called *Pystachio*, a powerful tool for
+configuration specification and reuse. Pystachio objects are tailored
+via {{}} surrounded templates.
+
+When writing your `.aurora` file, you may use any Pystachio datatypes, as
+well as any objects shown in the [*Aurora Configuration
+Reference*](../configuration/), without `import` statements - the
+Aurora config loader injects them automatically. Other than that, an `.aurora`
+file works like any other Python script.
+
+[*Aurora Configuration Reference*](../configuration/)
+has a full reference of all Aurora/Thermos defined Pystachio objects.
+
+### Use Bottom-To-Top Object Ordering
+
+A well-structured configuration starts with structural templates (if
+any). Structural templates encapsulate in their attributes all the
+differences between Jobs in the configuration that are not directly
+manipulated at the `Job` level, but typically at the `Process` or `Task`
+level. For example, if certain processes are invoked with slightly
+different settings or input.
+
+After structural templates, define, in order, `Process`es, `Task`s, and
+`Job`s.
+
+Structural template names should be *UpperCamelCased* and their
+instantiations are typically *UPPER\_SNAKE\_CASED*. `Process`, `Task`,
+and `Job` names are typically *lower\_snake\_cased*. Indentation is typically 2
+spaces.
+
+An Example Configuration File
+-----------------------------
+
+The following is a typical configuration file. Don't worry if there are
+parts you don't understand yet, but you may want to refer back to this
+as you read about its individual parts. Note that names surrounded by
+curly braces {{}} are template variables, which the system replaces with
+bound values for the variables.
+
+    # --- templates here ---
+	class Profile(Struct):
+	  package_version = Default(String, 'live')
+	  java_binary = Default(String, '/usr/lib/jvm/java-1.7.0-openjdk/bin/java')
+	  extra_jvm_options = Default(String, '')
+	  parent_environment = Default(String, 'prod')
+	  parent_serverset = Default(String,
+                                 '/foocorp/service/bird/{{parent_environment}}/bird')
+
+	# --- processes here ---
+	main = Process(
+	  name = 'application',
+	  cmdline = '{{profile.java_binary}} -server -Xmx1792m '
+	            '{{profile.extra_jvm_options}} '
+	            '-jar application.jar '
+	            '-upstreamService {{profile.parent_serverset}}'
+	)
+
+	# --- tasks ---
+	base_task = SequentialTask(
+	  name = 'application',
+	  processes = [
+	    Process(
+	      name = 'fetch',
+	      cmdline = 'curl -O
+                  https://packages.foocorp.com/{{profile.package_version}}/application.jar'),
+	  ]
+	)
+
+        # not always necessary but often useful to have separate task
+        # resource classes
+        staging_task = base_task(resources =
+                         Resources(cpu = 1.0,
+                                   ram = 2048*MB,
+                                   disk = 1*GB))
+	production_task = base_task(resources =
+                            Resources(cpu = 4.0,
+                                      ram = 2560*MB,
+                                      disk = 10*GB))
+
+	# --- job template ---
+	job_template = Job(
+	  name = 'application',
+	  role = 'myteam',
+	  contact = 'myteam-team@foocorp.com',
+	  instances = 20,
+	  service = True,
+	  task = production_task
+	)
+
+	# -- profile instantiations (if any) ---
+	PRODUCTION = Profile()
+	STAGING = Profile(
+	  extra_jvm_options = '-Xloggc:gc.log',
+	  parent_environment = 'staging'
+	)
+
+	# -- job instantiations --
+	jobs = [
+          job_template(cluster = 'cluster1', environment = 'prod')
+	               .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster2', environment = 'prod')
+	                .bind(profile = PRODUCTION),
+
+          job_template(cluster = 'cluster1',
+                        environment = 'staging',
+			service = False,
+			task = staging_task,
+			instances = 2)
+			.bind(profile = STAGING),
+	]
+
+## Defining Process Objects
+
+Processes are handled by the Thermos system. A process is a single
+executable step run as a part of an Aurora task, which consists of a
+bash-executable statement.
+
+The key (and required) `Process` attributes are:
+
+-   `name`: Any string which is a valid Unix filename (no slashes,
+    NULLs, or leading periods). The `name` value must be unique relative
+    to other Processes in a `Task`.
+-   `cmdline`: A command line run in a bash subshell, so you can use
+    bash scripts. Nothing is supplied for command-line arguments,
+    so `$*` is unspecified.
+
+Many tiny processes make managing configurations more difficult. For
+example, the following is a bad way to define processes.
+
+    copy = Process(
+      name = 'copy',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip'
+    )
+    unpack = Process(
+      name = 'unpack',
+      cmdline = 'unzip app.zip'
+    )
+    remove = Process(
+      name = 'remove',
+      cmdline = 'rm -f app.zip'
+    )
+    run = Process(
+      name = 'app',
+      cmdline = 'java -jar app.jar'
+    )
+    run_task = Task(
+      processes = [copy, unpack, remove, run],
+      constraints = order(copy, unpack, remove, run)
+    )
+
+Since `cmdline` runs in a bash subshell, you can chain commands
+with `&&` or `||`.
+
+When defining a `Task` that is just a list of Processes run in a
+particular order, use `SequentialTask`, as described in the [*Defining*
+`Task` *Objects*](#Task) section. The following simplifies and combines the
+above multiple `Process` definitions into just two.
+
+    stage = Process(
+      name = 'stage',
+      cmdline = 'curl -O https://packages.foocorp.com/app.zip && '
+                'unzip app.zip && rm -f app.zip')
+
+    run = Process(name = 'app', cmdline = 'java -jar app.jar')
+
+    run_task = SequentialTask(processes = [stage, run])
+
+`Process` also has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#process-objects).
+
+
+## Getting Your Code Into The Sandbox
+
+When using Aurora, you need to get your executable code into its "sandbox", specifically
+the Task sandbox where the code executes for the Processes that make up that Task.
+
+Each Task has a sandbox created when the Task starts and garbage
+collected when it finishes. All of a Task's processes run in its
+sandbox, so processes can share state by using a shared current
+working directory.
+
+Typically, you save this code somewhere. You then need to define a Process
+in your `.aurora` configuration file that fetches the code from that somewhere
+to where the agent can see it. For a public cloud, that can be anywhere public on
+the Internet, such as S3. For a private cloud internal storage, you need to put in
+on an accessible HDFS cluster or similar storage.
+
+The template for this Process is:
+
+    <name> = Process(
+      name = '<name>'
+      cmdline = '<command to copy and extract code archive into current working directory>'
+    )
+
+Note: Be sure the extracted code archive has an executable.
+
+## Getting Environment Variables Into The Sandbox
+
+Every time a process is forked the Thermos executor checks for the existence of the
+`.thermos_profile` file, if the `.thermos_profile` file exists it will be sourced.
+You can utilize this process to pass environment variables to the sandbox.
+
+An example for this Process is:
+
+    setup_env = Process(
+        name = 'setup',
+        cmdline = (
+            'cat <<EOF > .thermos_profile\n'
+            'export RESULT=hello\n'
+            'EOF\n'
+        )
+    )
+
+    read_env = Process(
+      name = 'read'
+      cmdline = 'echo $RESULT'
+    )
+
+## Defining Task Objects
+
+Tasks are handled by Mesos. A task is a collection of processes that
+runs in a shared sandbox. It's the fundamental unit Aurora uses to
+schedule the datacenter; essentially what Aurora does is find places
+in the cluster to run tasks.
+
+The key (and required) parts of a Task are:
+
+-   `name`: A string giving the Task's name. By default, if a Task is
+    not given a name, it inherits the first name in its Process list.
+
+-   `processes`: An unordered list of Process objects bound to the Task.
+    The value of the optional `constraints` attribute affects the
+    contents as a whole. Currently, the only constraint, `order`, determines if
+    the processes run in parallel or sequentially.
+
+-   `resources`: A `Resource` object defining the Task's resource
+        footprint. A `Resource` object has three attributes:
+        -   `cpu`: A Float, the fractional number of cores the Task
+        requires.
+        -   `ram`: An Integer, RAM bytes the Task requires.
+        -   `disk`: An integer, disk bytes the Task requires.
+
+A basic Task definition looks like:
+
+    Task(
+        name="hello_world",
+        processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+        resources=Resources(cpu = 1.0,
+                            ram = 1*GB,
+                            disk = 1*GB))
+
+A Task has optional attributes to customize its behaviour. Details can be found in the [Aurora Configuration Reference](../configuration/#task-object)
+
+
+### SequentialTask: Running Processes in Parallel or Sequentially
+
+By default, a Task with several Processes runs them in parallel. There
+are two ways to run Processes sequentially:
+
+-   Include an `order` constraint in the Task definition's `constraints`
+    attribute whose arguments specify the processes' run order:
+
+        Task( ... processes=[process1, process2, process3],
+	          constraints = order(process1, process2, process3), ...)
+
+-   Use `SequentialTask` instead of `Task`; it automatically runs
+    processes in the order specified in the `processes` attribute. No
+    `constraint` parameter is needed:
+
+        SequentialTask( ... processes=[process1, process2, process3] ...)
+
+### SimpleTask
+
+For quickly creating simple tasks, use the `SimpleTask` helper. It
+creates a basic task from a provided name and command line using a
+default set of resources. For example, in a .`aurora` configuration
+file:
+
+    SimpleTask(name="hello_world", command="echo hello world")
+
+is equivalent to
+
+    Task(name="hello_world",
+         processes=[Process(name = "hello_world", cmdline = "echo hello world")],
+         resources=Resources(cpu = 1.0,
+                             ram = 1*GB,
+                             disk = 1*GB))
+
+The simplest idiomatic Job configuration thus becomes:
+
+    import os
+    hello_world_job = Job(
+      task=SimpleTask(name="hello_world", command="echo hello world"),
+      role=os.getenv('USER'),
+      cluster="cluster1")
+
+When written to `hello_world.aurora`, you invoke it with a simple
+`aurora job create cluster1/$USER/test/hello_world hello_world.aurora`.
+
+### Combining tasks
+
+`Tasks.concat`(synonym,`concat_tasks`) and
+`Tasks.combine`(synonym,`combine_tasks`) merge multiple Task definitions
+into a single Task. It may be easier to define complex Jobs
+as smaller constituent Tasks. But since a Job only includes a single
+Task, the subtasks must be combined before using them in a Job.
+Smaller Tasks can also be reused between Jobs, instead of having to
+repeat their definition for multiple Jobs.
+
+With both methods, the merged Task takes the first Task's name. The
+difference between the two is the result Task's process ordering.
+
+-   `Tasks.combine` runs its subtasks' processes in no particular order.
+    The new Task's resource consumption is the sum of all its subtasks'
+    consumption.
+
+-   `Tasks.concat` runs its subtasks in the order supplied, with each
+    subtask's processes run serially between tasks. It is analogous to
+    the `order` constraint helper, except at the Task level instead of
+    the Process level. The new Task's resource consumption is the
+    maximum value specified by any subtask for each Resource attribute
+    (cpu, ram and disk).
+
+For example, given the following:
+
+    setup_task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper],
+      # It is important to note that {{Tasks.concat}} has
+      # no effect on the ordering of the processes within a task;
+      # hence the necessity of the {{order}} statement below
+      # (otherwise, the order in which {{download_interpreter}}
+      # and {{update_zookeeper}} run will be non-deterministic)
+      constraints=order(download_interpreter, update_zookeeper),
+      ...
+    )
+
+    run_task = SequentialTask(
+      ...
+      processes=[download_application, start_application],
+      ...
+    )
+
+    combined_task = Tasks.concat(setup_task, run_task)
+
+The `Tasks.concat` command merges the two Tasks into a single Task and
+ensures all processes in `setup_task` run before the processes
+in `run_task`. Conceptually, the task is reduced to:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper,
+                        download_application, start_application),
+      ...
+    )
+
+In the case of `Tasks.combine`, the two schedules run in parallel:
+
+    task = Task(
+      ...
+      processes=[download_interpreter, update_zookeeper,
+                 download_application, start_application],
+      constraints=order(download_interpreter, update_zookeeper) +
+                        order(download_application, start_application),
+      ...
+    )
+
+In the latter case, each of the two sequences may operate in parallel.
+Of course, this may not be the intended behavior (for example, if
+the `start_application` Process implicitly relies
+upon `download_interpreter`). Make sure you understand the difference
+between using one or the other.
+
+## Defining Job Objects
+
+A job is a group of identical tasks that Aurora can run in a Mesos cluster.
+
+A `Job` object is defined by the values of several attributes, some
+required and some optional. The required attributes are:
+
+-   `task`: Task object to bind to this job. Note that a Job can
+    only take a single Task.
+
+-   `role`: Job's role account; in other words, the user account to run
+    the job as on a Mesos cluster machine. A common value is
+    `os.getenv('USER')`; using a Python command to get the user who
+    submits the job request. The other common value is the service
+    account that runs the job, e.g. `www-data`.
+
+-   `environment`: Job's environment, typical values
+    are `devel`, `test`, or `prod`.
+
+-   `cluster`: Aurora cluster to schedule the job in, defined in
+    `/etc/aurora/clusters.json` or `~/.clusters.json`. You can specify
+    jobs where the only difference is the `cluster`, then at run time
+    only run the Job whose job key includes your desired cluster's name.
+
+You usually see a `name` parameter. By default, `name` inherits its
+value from the Job's associated Task object, but you can override this
+default. For these four parameters, a Job definition might look like:
+
+    foo_job = Job( name = 'foo', cluster = 'cluster1',
+              role = os.getenv('USER'), environment = 'prod',
+              task = foo_task)
+
+In addition to the required attributes, there are several optional
+attributes. Details can be found in the [Aurora Configuration Reference](../configuration/#job-objects).
+
+
+## The jobs List
+
+At the end of your `.aurora` file, you need to specify a list of the
+file's defined Jobs. For example, the following exports the jobs `job1`,
+`job2`, and `job3`.
+
+    jobs = [job1, job2, job3]
+
+This allows the aurora client to invoke commands on those jobs, such as
+starting, updating, or killing them.
+
+
+
+Basic Examples
+==============
+
+These are provided to give a basic understanding of simple Aurora jobs.
+
+### hello_world.aurora
+
+Put the following in a file named `hello_world.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    import os
+    hello_world_process = Process(name = 'hello_world', cmdline = 'echo hello world')
+
+    hello_world_task = Task(
+      resources = Resources(cpu = 0.1, ram = 16 * MB, disk = 16 * MB),
+      processes = [hello_world_process])
+
+    hello_world_job = Job(
+      cluster = 'cluster1',
+      role = os.getenv('USER'),
+      task = hello_world_task)
+
+    jobs = [hello_world_job]
+
+Then issue the following commands to create and kill the job, using your own values for the job key.
+
+    aurora job create cluster1/$USER/test/hello_world hello_world.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world
+
+### Environment Tailoring
+
+Put the following in a file named `hello_world_productionized.aurora`, substituting your own values
+for values such as `cluster`s.
+
+    include('hello_world.aurora')
+
+    production_resources = Resources(cpu = 1.0, ram = 512 * MB, disk = 2 * GB)
+    staging_resources = Resources(cpu = 0.1, ram = 32 * MB, disk = 512 * MB)
+    hello_world_template = hello_world(
+        name = "hello_world-{{cluster}}"
+        task = hello_world(resources=production_resources))
+
+    jobs = [
+      # production jobs
+      hello_world_template(cluster = 'cluster1', instances = 25),
+      hello_world_template(cluster = 'cluster2', instances = 15),
+
+      # staging jobs
+      hello_world_template(
+        cluster = 'local',
+        instances = 1,
+        task = hello_world(resources=staging_resources)),
+    ]
+
+Then issue the following commands to create and kill the job, using your own values for the job key
+
+    aurora job create cluster1/$USER/test/hello_world-cluster1 hello_world_productionized.aurora
+
+    aurora job kill cluster1/$USER/test/hello_world-cluster1
diff --git a/source/documentation/latest/reference/configuration.md b/source/documentation/latest/reference/configuration.md
new file mode 100644
index 0000000..88f50ae
--- /dev/null
+++ b/source/documentation/latest/reference/configuration.md
@@ -0,0 +1,737 @@
+Aurora Configuration Reference
+==============================
+
+Don't know where to start? The Aurora configuration schema is very
+powerful, and configurations can become quite complex for advanced use
+cases.
+
+For examples of simple configurations to get something up and running
+quickly, check out the [Tutorial](../../getting-started/tutorial/). When you feel comfortable with the basics, move
+on to the [Configuration Tutorial](../configuration-tutorial/) for more in-depth coverage of
+configuration design.
+
+- [Process Schema](#process-schema)
+    - [Process Objects](#process-objects)
+- [Task Schema](#task-schema)
+    - [Task Object](#task-object)
+    - [Constraint Object](#constraint-object)
+    - [Resource Object](#resource-object)
+- [Job Schema](#job-schema)
+    - [Job Objects](#job-objects)
+    - [UpdateConfig Objects](#updateconfig-objects)
+    - [HealthCheckConfig Objects](#healthcheckconfig-objects)
+    - [Announcer Objects](#announcer-objects)
+    - [Container Objects](#container)
+    - [LifecycleConfig Objects](#lifecycleconfig-objects)
+    - [SlaPolicy Objects](#slapolicy-objects)
+- [Specifying Scheduling Constraints](#specifying-scheduling-constraints)
+- [Template Namespaces](#template-namespaces)
+    - [mesos Namespace](#mesos-namespace)
+    - [thermos Namespace](#thermos-namespace)
+
+
+Process Schema
+==============
+
+Process objects consist of required `name` and `cmdline` attributes. You can customize Process
+behavior with its optional attributes. Remember, Processes are handled by Thermos.
+
+### Process Objects
+
+  **Attribute Name**  | **Type**    | **Description**
+  ------------------- | :---------: | ---------------------------------
+   **name**           | String      | Process name (Required)
+   **cmdline**        | String      | Command line (Required)
+   **max_failures**   | Integer     | Maximum process failures (Default: 1)
+   **daemon**         | Boolean     | When True, this is a daemon process. (Default: False)
+   **ephemeral**      | Boolean     | When True, this is an ephemeral process. (Default: False)
+   **min_duration**   | Integer     | Minimum duration between process restarts in seconds. (Default: 5)
+   **final**          | Boolean     | When True, this process is a finalizing one that should run last. (Default: False)
+   **logger**         | Logger      | Struct defining the log behavior for the process. (Default: Empty)
+
+#### name
+
+The name is any valid UNIX filename string (specifically no
+slashes, NULLs or leading periods). Within a Task object, each Process name
+must be unique.
+
+#### cmdline
+
+The command line run by the process. The command line is invoked in a bash
+subshell, so can involve fully-blown bash scripts. However, nothing is
+supplied for command-line arguments so `$*` is unspecified.
+
+#### max_failures
+
+The maximum number of failures (non-zero exit statuses) this process can
+have before being marked permanently failed and not retried. If a
+process permanently fails, Thermos looks at the failure limit of the task
+containing the process (usually 1) to determine if the task has
+failed as well.
+
+Setting `max_failures` to 0 makes the process retry
+indefinitely until it achieves a successful (zero) exit status.
+It retries at most once every `min_duration` seconds to prevent
+an effective denial of service attack on the coordinating Thermos scheduler.
+
+#### daemon
+
+By default, Thermos processes are non-daemon. If `daemon` is set to True, a
+successful (zero) exit status does not prevent future process runs.
+Instead, the process reinvokes after `min_duration` seconds.
+However, the maximum failure limit still applies. A combination of
+`daemon=True` and `max_failures=0` causes a process to retry
+indefinitely regardless of exit status. This should be avoided
+for very short-lived processes because of the accumulation of
+checkpointed state for each process run. When running in Mesos
+specifically, `max_failures` is capped at 100.
+
+#### ephemeral
+
+By default, Thermos processes are non-ephemeral. If `ephemeral` is set to
+True, the process' status is not used to determine if its containing task
+has completed. For example, consider a task with a non-ephemeral
+webserver process and an ephemeral logsaver process
+that periodically checkpoints its log files to a centralized data store.
+The task is considered finished once the webserver process has
+completed, regardless of the logsaver's current status.
+
+#### min_duration
+
+Processes may succeed or fail multiple times during a single task's
+duration. Each of these is called a *process run*. `min_duration` is
+the minimum number of seconds the scheduler waits before running the
+same process.
+
+#### final
+
+Processes can be grouped into two classes: ordinary processes and
+finalizing processes. By default, Thermos processes are ordinary. They
+run as long as the task is considered healthy (i.e., no failure
+limits have been reached.) But once all regular Thermos processes
+finish or the task reaches a certain failure threshold, it
+moves into a "finalization" stage and runs all finalizing
+processes. These are typically processes necessary for cleaning up the
+task, such as log checkpointers, or perhaps e-mail notifications that
+the task completed.
+
+Finalizing processes may not depend upon ordinary processes or
+vice-versa, however finalizing processes may depend upon other
+finalizing processes and otherwise run as a typical process
+schedule.
+
+#### logger
+
+The default behavior of Thermos is to store stderr/stdout logs in files which grow unbounded.
+In the event that you have large log volume, you may want to configure Thermos to automatically
+rotate logs after they grow to a certain size, which can prevent your job from using more than its
+allocated disk space.
+
+Logger objects specify a `destination` for Process logs which is, by default, `file` - a pair of
+`stdout` and `stderr` files. Its also possible to specify `console` to get logs output to
+the Process stdout and stderr streams, `none` to suppress any logs output or `both` to send logs to
+files and console streams.
+
+The default Logger `mode` is `standard` which lets the stdout and stderr streams grow without bound.
+
+  **Attribute Name**  | **Type**          | **Description**
+  ------------------- | :---------------: | ---------------------------------
+   **destination**    | LoggerDestination | Destination of logs. (Default: `file`)
+   **mode**           | LoggerMode        | Mode of the logger. (Default: `standard`)
+   **rotate**         | RotatePolicy      | An optional rotation policy. (Default: `Empty`)
+
+A RotatePolicy describes log rotation behavior for when `mode` is set to `rotate` and it is ignored
+otherwise. If `rotate` is `Empty` or `RotatePolicy()` when the `mode` is set to `rotate` the
+defaults below are used.
+
+  **Attribute Name**  | **Type**     | **Description**
+  ------------------- | :----------: | ---------------------------------
+   **log_size**       | Integer      | Maximum size (in bytes) of an individual log file. (Default: 100 MiB)
+   **backups**        | Integer      | The maximum number of backups to retain. (Default: 5)
+
+An example process configuration is as follows:
+
+        process = Process(
+          name='process',
+          logger=Logger(
+            destination=LoggerDestination('both'),
+            mode=LoggerMode('rotate'),
+            rotate=RotatePolicy(log_size=5*MB, backups=5)
+          )
+        )
+
+Task Schema
+===========
+
+Tasks fundamentally consist of a `name` and a list of Process objects stored as the
+value of the `processes` attribute. Processes can be further constrained with
+`constraints`. By default, `name`'s value inherits from the first Process in the
+`processes` list, so for simple `Task` objects with one Process, `name`
+can be omitted. In Mesos, `resources` is also required.
+
+### Task Object
+
+   **param**               | **type**                         | **description**
+   ---------               | :---------:                      | ---------------
+   ```name```              | String                           | Process name (Required) (Default: ```processes0.name```)
+   ```processes```         | List of ```Process``` objects    | List of ```Process``` objects bound to this task. (Required)
+   ```constraints```       | List of ```Constraint``` objects | List of ```Constraint``` objects constraining processes.
+   ```resources```         | ```Resource``` object            | Resource footprint. (Required)
+   ```max_failures```      | Integer                          | Maximum process failures before being considered failed (Default: 1)
+   ```max_concurrency```   | Integer                          | Maximum number of concurrent processes (Default: 0, unlimited concurrency.)
+   ```finalization_wait``` | Integer                          | Amount of time allocated for finalizing processes, in seconds. (Default: 30)
+
+#### name
+`name` is a string denoting the name of this task. It defaults to the name of the first Process in
+the list of Processes associated with the `processes` attribute.
+
+#### processes
+
+`processes` is an unordered list of `Process` objects. To constrain the order
+in which they run, use `constraints`.
+
+##### constraints
+
+A list of `Constraint` objects. Currently it supports only one type,
+the `order` constraint. `order` is a list of process names
+that should run in the order given. For example,
+
+        process = Process(cmdline = "echo hello {{name}}")
+        task = Task(name = "echoes",
+                    processes = [process(name = "jim"), process(name = "bob")],
+                    constraints = [Constraint(order = ["jim", "bob"]))
+
+Constraints can be supplied ad-hoc and in duplicate. Not all
+Processes need be constrained, however Tasks with cycles are
+rejected by the Thermos scheduler.
+
+Use the `order` function as shorthand to generate `Constraint` lists.
+The following:
+
+        order(process1, process2)
+
+is shorthand for
+
+        [Constraint(order = [process1.name(), process2.name()])]
+
+The `order` function accepts Process name strings `('foo', 'bar')` or the processes
+themselves, e.g. `foo=Process(name='foo', ...)`, `bar=Process(name='bar', ...)`,
+`constraints=order(foo, bar)`.
+
+#### resources
+
+Takes a `Resource` object, which specifies the amounts of CPU, memory, and disk space resources
+to allocate to the Task.
+
+#### max_failures
+
+`max_failures` is the number of failed processes needed for the `Task` to be
+marked as failed.
+
+For example, assume a Task has two Processes and a `max_failures` value of `2`:
+
+        template = Process(max_failures=10)
+        task = Task(
+          name = "fail",
+          processes = [
+             template(name = "failing", cmdline = "exit 1"),
+             template(name = "succeeding", cmdline = "exit 0")
+          ],
+          max_failures=2)
+
+The `failing` Process could fail 10 times before being marked as permanently
+failed, and the `succeeding` Process could succeed on the first run. However,
+the task would succeed despite only allowing for two failed processes. To be more
+specific, there would be 10 failed process runs yet 1 failed process. Both processes
+would have to fail for the Task to fail.
+
+#### max_concurrency
+
+For Tasks with a number of expensive but otherwise independent
+processes, you may want to limit the amount of concurrency
+the Thermos scheduler provides rather than artificially constraining
+it via `order` constraints. For example, a test framework may
+generate a task with 100 test run processes, but wants to run it on
+a machine with only 4 cores. You can limit the amount of parallelism to
+4 by setting `max_concurrency=4` in your task configuration.
+
+For example, the following task spawns 180 Processes ("mappers")
+to compute individual elements of a 180 degree sine table, all dependent
+upon one final Process ("reducer") to tabulate the results:
+
+    def make_mapper(id):
+      return Process(
+        name = "mapper%03d" % id,
+        cmdline = "echo 'scale=50;s(%d\*4\*a(1)/180)' | bc -l >
+                   temp.sine_table.%03d" % (id, id))
+
+    def make_reducer():
+      return Process(name = "reducer", cmdline = "cat temp.\* | nl \> sine\_table.txt
+                     && rm -f temp.\*")
+
+    processes = map(make_mapper, range(180))
+
+    task = Task(
+      name = "mapreduce",
+      processes = processes + [make\_reducer()],
+      constraints = [Constraint(order = [mapper.name(), 'reducer']) for mapper
+                     in processes],
+      max_concurrency = 8)
+
+#### finalization_wait
+
+Process execution is organizued into three active stages: `ACTIVE`,
+`CLEANING`, and `FINALIZING`. The `ACTIVE` stage is when ordinary processes run.
+This stage lasts as long as Processes are running and the Task is healthy.
+The moment either all Processes have finished successfully or the Task has reached a
+maximum Process failure limit, it goes into `CLEANING` stage and send
+SIGTERMs to all currently running Processes and their process trees.
+Once all Processes have terminated, the Task goes into `FINALIZING` stage
+and invokes the schedule of all Processes with the "final" attribute set to True.
+
+This whole process from the end of `ACTIVE` stage to the end of `FINALIZING`
+must happen within `finalization_wait` seconds. If it does not
+finish during that time, all remaining Processes are sent SIGKILLs
+(or if they depend upon uncompleted Processes, are
+never invoked.)
+
+When running on Aurora, the `finalization_wait` is capped at 60 seconds.
+
+### Constraint Object
+
+Current constraint objects only support a single ordering constraint, `order`,
+which specifies its processes run sequentially in the order given. By
+default, all processes run in parallel when bound to a `Task` without
+ordering constraints.
+
+   param | type           | description
+   ----- | :----:         | -----------
+   order | List of String | List of processes by name (String) that should be run serially.
+
+### Resource Object
+
+Specifies the amount of CPU, Ram, and disk resources the task needs. See the
+[Resource Isolation document](../../features/resource-isolation/) for suggested values and to understand how
+resources are allocated.
+
+  param      | type    | description
+  -----      | :----:  | -----------
+  ```cpu```  | Float   | Fractional number of cores required by the task.
+  ```ram```  | Integer | Bytes of RAM required by the task.
+  ```disk``` | Integer | Bytes of disk required by the task.
+  ```gpu```  | Integer | Number of GPU cores required by the task
+
+
+Job Schema
+==========
+
+### Job Objects
+
+*Note: Specifying a ```Container``` object as the value of the ```container``` property is
+  deprecated in favor of setting its value directly to the appropriate ```Docker``` or ```Mesos```
+  container type*
+
+*Note: Specifying preemption behavior of tasks through `production` flag is deprecated in favor of
+  electing appropriate task tier via `tier` attribute.*
+
+   name | type | description
+   ------ | :-------: | -------
+  ```task``` | Task | The Task object to bind to this job. Required.
+  ```name``` | String | Job name. (Default: inherited from the task attribute's name)
+  ```role``` | String | Job role account. Required.
+  ```cluster``` | String | Cluster in which this job is scheduled. Required.
+  ```environment``` | String | Job environment, default ```devel```. By default must be one of ```prod```, ```devel```, ```test``` or ```staging<number>``` but it can be changed by the Cluster operator using the scheduler option `allowed_job_environments`.
+  ```contact``` | String | Best email address to reach the owner of the job. For production jobs, this is usually a team mailing list.
+  ```instances```| Integer | Number of instances (sometimes referred to as replicas or shards) of the task to create. (Default: 1)
+  ```cron_schedule``` | String | Cron schedule in cron format. May only be used with non-service jobs. See [Cron Jobs](../../features/cron-jobs/) for more information. Default: None (not a cron job.)
+  ```cron_collision_policy``` | String | Policy to use when a cron job is triggered while a previous run is still active. KILL\_EXISTING Kill the previous run, and schedule the new run CANCEL\_NEW Let the previous run continue, and cancel the new run. (Default: KILL_EXISTING)
+  ```update_config``` | ```UpdateConfig``` object | Parameters for controlling the rate and policy of rolling updates.
+  ```constraints``` | dict | Scheduling constraints for the tasks. See the section on the [constraint specification language](#specifying-scheduling-constraints)
+  ```service``` | Boolean | If True, restart tasks regardless of success or failure. (Default: False)
+  ```max_task_failures``` | Integer | Maximum number of failures after which the task is considered to have failed (Default: 1) Set to -1 to allow for infinite failures
+  ```priority``` | Integer | Preemption priority to give the task (Default 0). Tasks with higher priorities may preempt tasks at lower priorities.
+  ```production``` | Boolean |  (Deprecated) Whether or not this is a production task that may [preempt](../../features/multitenancy/#preemption) other tasks (Default: False). Production job role must have the appropriate [quota](../../features/multitenancy/#preemption).
+  ```health_check_config``` | ```HealthCheckConfig``` object | Parameters for controlling a task's health checks. HTTP health check is only used if a  health port was assigned with a command line wildcard.
+  ```container``` | Choice of ```Container```, ```Docker``` or ```Mesos``` object | An optional container to run all processes inside of.
+  ```lifecycle``` | ```LifecycleConfig``` object | An optional task lifecycle configuration that dictates commands to be executed on startup/teardown.  HTTP lifecycle is enabled by default if the "health" port is requested.  See [LifecycleConfig Objects](#lifecycleconfig-objects) for more information.
+  ```tier``` | String | Task tier type. The default scheduler tier configuration allows for 3 tiers: `revocable`, `preemptible`, and `preferred`. If a tier is not elected, Aurora assigns the task to a tier based on its choice of `production` (that is `preferred` for production and `preemptible` for non-production jobs). See the section on [Configuration Tiers](../../features/multitenancy/#configuration-tiers) for more information.
+  ```announce``` | ```Announcer``` object | Optionally enable Zookeeper ServerSet announcements. See [Announcer Objects] for more information.
+  ```enable_hooks``` | Boolean | Whether to enable [Client Hooks](../client-hooks/) for this job. (Default: False)
+  ```partition_policy``` | ```PartitionPolicy``` object | An optional partition policy that allows job owners to define how to handle partitions for running tasks (in partition-aware Aurora clusters)
+  ```metadata``` | list of ```Metadata``` objects | list of ```Metadata``` objects for user's customized metadata information.
+  ```executor_config``` | ```ExecutorConfig``` object | Allows choosing an alternative executor defined in `custom_executor_config` to be used instead of Thermos. Tasks will be launched with Thermos as the executor by default. See [Custom Executors](../../features/custom-executors/) for more info.
+  ```sla_policy``` |  Choice of ```CountSlaPolicy```, ```PercentageSlaPolicy``` or ```CoordinatorSlaPolicy``` object | An optional SLA policy that allows job owners to describe the SLA requirements for the job. See [SlaPolicy Objects](#slapolicy-objects) for more information.
+
+
+### UpdateConfig Objects
+
+Parameters for controlling the rate and policy of rolling updates.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```watch_secs```             | Integer  | Minimum number of seconds a shard must remain in ```RUNNING``` state before considered a success (Default: 45)
+| ```max_per_shard_failures``` | Integer  | Maximum number of restarts per shard during update. Increments total failure count when this limit is exceeded. (Default: 0)
+| ```max_total_failures```     | Integer  | Maximum number of shard failures to be tolerated in total during an update. Cannot be greater than or equal to the total number of tasks in a job. (Default: 0)
+| ```rollback_on_failure```    | boolean  | When False, prevents auto rollback of a failed update (Default: True)
+| ```wait_for_batch_completion```| boolean | When True, all threads from a given batch will be blocked from picking up new instances until the entire batch is updated. This essentially simulates the legacy sequential updater algorithm. (Default: False)
+| ```pulse_interval_secs```    | Integer  |  Indicates a [coordinated update](../../features/job-updates/#coordinated-job-updates). If no pulses are received within the provided interval the update will be blocked. Beta-updater only. Will fail on submission when used with client updater. (Default: None)
+| ```update_strategy```        | Choice of ```QueueUpdateStrategy```,  ```BatchUpdateStrategy```, or ```VariableBatchUpdateStrategy``` object | Indicate which update strategy to use for this update.
+| ```sla_aware```              | boolean  | When True, updates will only update an instance if it does not break the task's specified [SLA Requirements](../../features/sla-requirements/). (Default: None)
+
+### QueueUpdateStrategy Objects
+
+Update strategy which will keep the active updating instances at size ```batch_size``` throughout the update until there are no more instances left to update.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+
+### BatchUpdateStrategy Objects
+
+Update strategy which will wait until a maximum of ``batch_size`` number of instances are updated before continuing on to the next group until all instances are updated.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_size```             | Integer  | Maximum number of shards to be updated in one iteration (Default: 1)
+| ```autopause_after_batch```             | Boolean  | Automatically pauses update after completing a batch. (Default: False)
+
+### VariableBatchUpdateStrategy Objects
+
+Similar to Batch Update strategy, this strategy will wait until all instances in a current group are
+updated before updating more instances. However, instead of maintaining a static group size, the
+size of each group may change as the update progresses. For example, an update which modifies a
+total of 10 instances may be done in batch sizes of 2, 3, and 5. If the number of instances to
+be updated are greater than the sum of the groups, the last group size will be used in
+perpetuity until all instances are updated. Following the previous example, if instead of 10
+instances 20 instances are modified, the update groups would become: 2, 3, 5, 5, 5.
+
+| object                       | type     | description
+| ---------------------------- | :------: | ------------
+| ```batch_sizes```             | List(Integer)  | Maximum number of shards to be updated per iteration. As each iteration completes, the next iteration's group size may change. If there are still instances that need to be updated after all sizes are used, the last size will be reused for the remainder of the update.
+| ```autopause_after_batch```             | Boolean  | Automatically pauses update before starting a new batch. (Default: False)
+
+#### Using the `sla_aware` option
+
+There are some nuances around the `sla_aware` option that users should be aware of:
+
+- SLA-aware updates work in tandem with maintenance. Draining a host that has an instance of the
+job being updated affects the SLA and thus will be taken into account when the update determines
+whether or not it is safe to update another instance.
+- SLA-aware updates will use the [SLAPolicy](../../features/sla-requirements/#custom-sla) of the
+*newest* configuration when determining whether or not it is safe to update an instance. For
+example, if the current configuration specifies a
+[PercentageSlaPolicy](../../features/sla-requirements/#percentageslapolicy-objects) that allows for
+5% of instances to be down and the updated configuration increaes this value to 10%, the SLA
+calculation will be done using the 10% policy. Be mindful of this when doing an update that
+modifies the `SLAPolicy` since it may be possible to put the old configuration in a bad state
+that the new configuration would not be affected by. Additionally, if the update is rolled back,
+then the rollback will use the old `SLAPolicy` (or none if there was not one previously).
+- If using the [CoordinatorSlaPolicy](../../features/sla-requirements/#coordinatorslapolicy-objects),
+it is important to pay attention to the `batch_size` of the update. If you have a complex SLA
+requirement, then you may be limiting the throughput of your updates with an insufficient
+`batch_size`. For example, imagine you have a job with 9 instance that represents three
+replicated caches, and you can only update one instance per replica set: `[0 1 2]
+[3 4 5] [6 7 8]` (the number indicates the instance ID and the brackets represent replica
+sets). If your `batch_size` is 3, then you will slowly update one replica set at a time. If your
+`batch_size` is 9, then you can update all replica sets in parallel and thus speeding up the update.
+- If an instance fails an SLA check for an update, then it will be rechecked starting at a delay
+from `sla_aware_kill_retry_min_delay` and exponentially increasing up to
+`sla_aware_kill_retry_max_delay`. These are cluster-operator set values.
+
+### HealthCheckConfig Objects
+
+Parameters for controlling a task's health checks via HTTP or a shell command.
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```health_checker```           | HealthCheckerConfig | Configure what kind of health check to use.
+| ```initial_interval_secs```    | Integer   | Initial grace period (during which health-check failures are ignored) while performing health checks. (Default: 15)
+| ```interval_secs```            | Integer   | Interval on which to check the task's health. (Default: 10)
+| ```max_consecutive_failures``` | Integer   | Maximum number of consecutive failures that will be tolerated before considering a task unhealthy (Default: 0)
+| ```min_consecutive_successes``` | Integer   | Minimum number of consecutive successful health checks required before considering a task healthy (Default: 1)
+| ```timeout_secs```             | Integer   | Health check timeout. (Default: 1)
+
+### HealthCheckerConfig Objects
+| param                          | type                | description
+| -------                        | :-------:           | --------
+| ```http```                     | HttpHealthChecker  | Configure health check to use HTTP. (Default)
+| ```shell```                    | ShellHealthChecker | Configure health check via a shell command.
+
+### HttpHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```endpoint```                 | String    | HTTP endpoint to check (Default: /health)
+| ```expected_response```        | String    | If not empty, fail the HTTP health check if the response differs. Case insensitive. (Default: ok)
+| ```expected_response_code```   | Integer   | If not zero, fail the HTTP health check if the response code differs. (Default: 0)
+
+### ShellHealthChecker Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```shell_command```            | String    | An alternative to HTTP health checking. Specifies a shell command that will be executed. Any non-zero exit status will be interpreted as a health check failure.
+
+### PartitionPolicy Objects
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```reschedule```               | Boolean   | Whether or not to reschedule when running tasks become partitioned (Default: True)
+| ```delay_secs```               | Integer   | How long to delay transitioning to LOST when running tasks are partitioned. (Default: 0)
+
+### Metadata Objects
+
+Describes a piece of user metadata in a key value pair
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```key```        | String          | Indicate which metadata the user provides
+  ```value```      | String          | Provide the metadata content for corresponding key
+
+### ExecutorConfig Objects
+
+Describes an Executor name and data to pass to the Mesos Task
+
+| param                          | type      | description
+| -------                        | :-------: | --------
+| ```name```               | String   | Name of the executor to use for this task. Must match the name of an executor in `custom_executor_config` or Thermos (`AuroraExecutor`). (Default: AuroraExecutor)
+| ```data```               | String   | Data blob to pass on to the executor. (Default: "")
+
+### Announcer Objects
+
+If the `announce` field in the Job configuration is set, each task will be
+registered in the ServerSet `/aurora/role/environment/jobname` in the
+zookeeper ensemble configured by the executor (which can be optionally overriden by specifying
+`zk_path` parameter).  If no Announcer object is specified,
+no announcement will take place.  For more information about ServerSets, see the [Service Discover](../../features/service-discovery/)
+documentation.
+
+By default, the hostname in the registered endpoints will be the `--hostname` parameter
+that is passed to the mesos agent. To override the hostname value, the executor can be started
+with `--announcer-hostname=<overriden_value>`. If you decide to use `--announcer-hostname` and if
+the overriden value needs to change for every executor, then the executor has to be started inside a wrapper, see [Executor Wrapper](../../operations/configuration/#thermos-executor-wrapper).
+
+For example, if you want the hostname in the endpoint to be an IP address instead of the hostname,
+the `--hostname` parameter to the mesos agent can be set to the machine IP or the executor can
+be started with `--announcer-hostname=<host_ip>` while wrapping the executor inside a script.
+
+| object                         | type      | description
+| -------                        | :-------: | --------
+| ```primary_port```             | String    | Which named port to register as the primary endpoint in the ServerSet (Default: `http`)
+| ```portmap```                  | dict      | A mapping of additional endpoints to be announced in the ServerSet (Default: `{ 'aurora': '{{primary_port}}' }`)
+| ```zk_path```                  | String    | Zookeeper serverset path override (executor must be started with the `--announcer-allow-custom-serverset-path` parameter)
+
+#### Port aliasing with the Announcer `portmap`
+
+The primary endpoint registered in the ServerSet is the one allocated to the port
+specified by the `primary_port` in the `Announcer` object, by default
+the `http` port.  This port can be referenced from anywhere within a configuration
+as `{{thermos.ports[http]}}`.
+
+Without the port map, each named port would be allocated a unique port number.
+The `portmap` allows two different named ports to be aliased together.  The default
+`portmap` aliases the `aurora` port (i.e. `{{thermos.ports[aurora]}}`) to
+the `http` port.  Even though the two ports can be referenced independently,
+only one port is allocated by Mesos.  Any port referenced in a `Process` object
+but which is not in the portmap will be allocated dynamically by Mesos and announced as well.
+
+It is possible to use the portmap to alias names to static port numbers, e.g.
+`{'http': 80, 'https': 443, 'aurora': 'http'}`.  In this case, referencing
+`{{thermos.ports[aurora]}}` would look up `{{thermos.ports[http]}}` then
+find a static port 80.  No port would be requested of or allocated by Mesos.
+
+Static ports should be used cautiously as Aurora does nothing to prevent two
+tasks with the same static port allocations from being co-scheduled.
+External constraints such as agent attributes should be used to enforce such
+guarantees should they be needed.
+
+
+### Container Objects
+
+Describes the container the job's processes will run inside. If not using Docker or the Mesos
+unified-container, the container can be omitted from your job config.
+
+  param          | type           | description
+  -----          | :----:         | -----------
+  ```mesos```    | Mesos          | A native Mesos container to use.
+  ```docker```   | Docker         | A Docker container to use (via Docker engine)
+
+### Mesos Object
+
+  param            | type                           | description
+  -----            | :----:                         | -----------
+  ```image```      | Choice(AppcImage, DockerImage) | An optional filesystem image to use within this container.
+  ```volumes```    | List(Volume)                   | An optional list of volume mounts for this container.
+
+### Volume Object
+
+  param                  | type     | description
+  -----                  | :----:   | -----------
+  ```container_path```   | String   | Path on the host to mount.
+  ```host_path```        | String   | Mount point in the container.
+  ```mode```             | Enum     | Mode of the mount, can be 'RW' or 'RO'.
+
+### AppcImage
+
+Describes an AppC filesystem image.
+
+  param          | type   | description
+  -----          | :----: | -----------
+  ```name```     | String | The name of the appc image.
+  ```image_id``` | String | The [image id](https://github.com/appc/spec/blob/master/spec/aci.md#image-id) of the appc image.
+
+### DockerImage
+
+Describes a Docker filesystem image.
+
+  param      | type   | description
+  -----      | :----: | -----------
+  ```name``` | String | The name of the docker image.
+  ```tag```  | String | The tag that identifies the docker image.
+
+
+### Docker Object
+
+*Note: In order to correctly execute processes inside a job, the Docker container must have Python 2.7 installed.*
+*Note: For private docker registry, mesos mandates the docker credential file to be named as `.dockercfg`, even though docker may create a credential file with a different name on various platforms. Also, the `.dockercfg` file needs to be copied into the sandbox using the `-thermos_executor_resources` flag, specified while starting Aurora.*
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```image```      | String          | The name of the docker image to execute.  If the image does not exist locally it will be pulled with ```docker pull```.
+  ```parameters``` | List(Parameter) | Additional parameters to pass to the Docker engine.
+
+### Docker Parameter Object
+
+Docker CLI parameters. This needs to be enabled by the scheduler `-allow_docker_parameters` option.
+See [Docker Command Line Reference](https://docs.docker.com/reference/commandline/run/) for valid parameters.
+
+  param            | type            | description
+  -----            | :----:          | -----------
+  ```name```       | String          | The name of the docker parameter. E.g. volume
+  ```value```      | String          | The value of the parameter. E.g. /usr/local/bin:/usr/bin:rw
+
+
+### LifecycleConfig Objects
+
+*Note: The only lifecycle configuration supported is the HTTP lifecycle via the HttpLifecycleConfig.*
+
+  param          | type                | description
+  -----          | :----:              | -----------
+  ```http```     | HttpLifecycleConfig | Configure the lifecycle manager to send lifecycle commands to the task via HTTP.
+
+### HttpLifecycleConfig Objects
+
+*Note: The combined `graceful_shutdown_wait_secs` and `shutdown_wait_secs` is implicitly upper bounded by the `--stop_timeout_in_secs` flag exposed by the executor (see options [here](https://github.com/apache/aurora/blob/master/src/main/python/apache/aurora/executor/bin/thermos_executor_main.py), default is 2 minutes). Therefore, if the user specifies values that add up to more than `--stop_timeout_in_secs`, the task will be killed earlier than the user anticipates (see the termination lifecycle [here](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting)). Furthermore, `stop_timeout_in_secs` itself is implicitly upper bounded by two scheduler options: `transient_task_state_timeout` and `preemption_slot_hold_time` (see reference [here](http://aurora.apache.org/documentation/latest/reference/scheduler-configuration/). If the `stop_timeout_in_secs` exceeds either of these scheduler options, tasks could be designated as LOST or tasks utilizing preemption could lose their desired slot respectively. Cluster operators should be aware of these timings should they change the defaults.*
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```port```                        | String  | The named port to send POST commands. (Default: health)
+  ```graceful_shutdown_endpoint```  | String  | Endpoint to hit to indicate that a task should gracefully shutdown. (Default: /quitquitquit)
+  ```shutdown_endpoint```           | String  | Endpoint to hit to give a task its final warning before being killed. (Default: /abortabortabort)
+  ```graceful_shutdown_wait_secs``` | Integer | The amount of time (in seconds) to wait after hitting the ```graceful_shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+  ```shutdown_wait_secs```          | Integer | The amount of time (in seconds) to wait after hitting the ```shutdown_endpoint``` before proceeding with the [task termination lifecycle](https://aurora.apache.org/documentation/latest/reference/task-lifecycle/#forceful-termination-killing-restarting). (Default: 5)
+
+#### graceful\_shutdown\_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request that the task gracefully shut itself down.  This is a
+courtesy call before the `shutdown_endpoint` is invoked
+`graceful_shutdown_wait_secs` seconds later.
+
+#### shutdown_endpoint
+
+If the Job is listening on the port as specified by the HttpLifecycleConfig
+(default: `health`), a HTTP POST request will be sent over localhost to this
+endpoint to request as a final warning before being shut down.  If the task
+does not shut down on its own after `shutdown_wait_secs` seconds, it will be
+forcefully killed.
+
+
+### SlaPolicy Objects
+
+Configuration for specifying custom [SLA requirements](../../features/sla-requirements/) for a job. There are 3 supported SLA policies
+namely, [`CountSlaPolicy`](#countslapolicy-objects), [`PercentageSlaPolicy`](#percentageslapolicy-objects) and [`CoordinatorSlaPolicy`](#coordinatorslapolicy-objects).
+
+
+### CountSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```count```                       | Integer | The number of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### PercentageSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```percentage```                  | Float   | The percentage of active instances required every `durationSecs`.
+  ```duration_secs```               | Integer | Minimum time duration a task needs to be `RUNNING` to be treated as active.
+
+### CoordinatorSlaPolicy Objects
+
+  param                             | type    | description
+  -----                             | :----:  | -----------
+  ```coordinator_url```             | String  | The URL to the [Coordinator](../../features/sla-requirements/#coordinator) service to be contacted before performing SLA affecting actions (job updates, host drains etc).
+  ```status_key```                  | String  | The field in the Coordinator response that indicates the SLA status for working on the task. (Default: `drain`)
+
+
+Specifying Scheduling Constraints
+=================================
+
+In the `Job` object there is a map `constraints` from String to String
+allowing the user to tailor the schedulability of tasks within the job.
+
+The constraint map's key value is the attribute name in which we
+constrain Tasks within our Job. The value is how we constrain them.
+There are two types of constraints: *limit constraints* and *value
+constraints*.
+
+| constraint    | description
+| ------------- | --------------
+| Limit         | A string that specifies a limit for a constraint. Starts with <code>'limit:</code> followed by an Integer and closing single quote, such as ```'limit:1'```.
+| Value         | A string that specifies a value for a constraint. To include a list of values, separate the values using commas. To negate the values of a constraint, start with a ```!``` ```.```
+
+Further details can be found in the [Scheduling Constraints](../../features/constraints/) feature
+description.
+
+
+Template Namespaces
+===================
+
+Currently, a few Pystachio namespaces have special semantics. Using them
+in your configuration allow you to tailor application behavior
+through environment introspection or interact in special ways with the
+Aurora client or Aurora-provided services.
+
+### mesos Namespace
+
+The `mesos` namespace contains variables which relate to the `mesos` agent
+which launched the task. The `instance` variable can be used
+to distinguish between Task replicas.
+
+| variable name     | type       | description
+| --------------- | :--------: | -------------
+| ```instance```    | Integer    | The instance number of the created task. A job with 5 replicas has instance numbers 0, 1, 2, 3, and 4.
+| ```hostname``` | String | The instance hostname that the task was launched on.
+
+Please note, there is no uniqueness guarantee for `instance` in the presence of
+network partitions. If that is required, it should be baked in at the application
+level using a distributed coordination service such as Zookeeper.
+
+### thermos Namespace
+
+The `thermos` namespace contains variables that work directly on the
+Thermos platform in addition to Aurora. This namespace is fully
+compatible with Tasks invoked via the `thermos` CLI.
+
+| variable      | type                     | description                        |
+| :----------:  | ---------                | ------------                       |
+| ```ports```   | map of string to Integer | A map of names to port numbers     |
+| ```task_id``` | string                   | The task ID assigned to this task. |
+
+The `thermos.ports` namespace is automatically populated by Aurora when
+invoking tasks on Mesos. When running the `thermos` command directly,
+these ports must be explicitly mapped with the `-P` option.
+
+For example, if '{{`thermos.ports[http]`}}' is specified in a `Process`
+configuration, it is automatically extracted and auto-populated by
+Aurora, but must be specified with, for example, `thermos -P http:12345`
+to map `http` to port 12345 when running via the CLI.
diff --git a/source/documentation/latest/reference/observer-configuration.md b/source/documentation/latest/reference/observer-configuration.md
new file mode 100644
index 0000000..5b9ce2b
--- /dev/null
+++ b/source/documentation/latest/reference/observer-configuration.md
@@ -0,0 +1,108 @@
+# Observer Configuration Reference
+
+The Aurora/Thermos observer can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `thermos_observer --long-help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+$ thermos_observer.pex --long-help
+Options:
+  -h, --help, --short-help
+                        show this help message and exit.
+  --long-help           show options from all registered modules, not just the
+                        __main__ module.
+  --mesos-root=MESOS_ROOT
+                        The mesos root directory to search for Thermos
+                        executor sandboxes [default: /var/lib/mesos]
+  --ip=IP               The IP address the observer will bind to. [default:
+                        0.0.0.0]
+  --port=PORT           The port on which the observer should listen.
+                        [default: 1338]
+  --polling_interval_secs=POLLING_INTERVAL_SECS
+                        The number of seconds between observer refresh
+                        attempts. [default: 5]
+  --disable_task_resource_collection
+                        Disable collection of CPU and memory statistics for
+                        each active task. Those can be expensive to collect if
+                        there are hundreds of active tasks per host. [default:
+                        False]
+  --task_process_collection_interval_secs=TASK_PROCESS_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task process
+                        resource collections. [default: 20]
+  --task_disk_collection_interval_secs=TASK_DISK_COLLECTION_INTERVAL_SECS
+                        The number of seconds between per task disk resource
+                        collections. [default: 60]
+  --enable_mesos_disk_collector
+                        Delegate per task disk usage collection to agent.
+                        Should be enabled in conjunction to disk isolation in
+                        Mesos-agent. This is not compatible with an
+                        authenticated agent API. [default: False]
+  --agent_api_url=AGENT_API_URL
+                        Mesos Agent API url. [default:
+                        http://localhost:5051/containers]
+  --executor_id_json_path=EXECUTOR_ID_JSON_PATH
+                        `jmespath` to executor_id key in agent response json
+                        object. [default: executor_id]
+  --disk_usage_json_path=DISK_USAGE_JSON_PATH
+                        `jmespath` to disk usage bytes value in agent response
+                        json object. [default: statistics.disk_used_bytes]
+
+  From module twitter.common.app:
+    --app_daemonize     Daemonize this application. [default: False]
+    --app_profile_output=FILENAME
+                        Dump the profiling output to a binary profiling
+                        format. [default: None]
+    --app_daemon_stderr=TWITTER_COMMON_APP_DAEMON_STDERR
+                        Direct this app's stderr to this file if daemonized.
+                        [default: /dev/null]
+    --app_debug         Print extra debugging information during application
+                        initialization. [default: False]
+    --app_rc_filename   Print the filename for the rc file and quit. [default:
+                        False]
+    --app_daemon_stdout=TWITTER_COMMON_APP_DAEMON_STDOUT
+                        Direct this app's stdout to this file if daemonized.
+                        [default: /dev/null]
+    --app_profiling     Run profiler on the code while it runs.  Note this can
+                        cause slowdowns. [default: False]
+    --app_ignore_rc_file
+                        Ignore default arguments from the rc file. [default:
+                        False]
+    --app_pidfile=TWITTER_COMMON_APP_PIDFILE
+                        The pidfile to use if --app_daemonize is specified.
+                        [default: None]
+
+  From module twitter.common.log.options:
+    --log_to_stdout=[scheme:]LEVEL
+                        OBSOLETE - legacy flag, use --log_to_stderr instead.
+                        [default: ERROR]
+    --log_to_stderr=[scheme:]LEVEL
+                        The level at which logging to stderr [default: ERROR].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_to_disk=[scheme:]LEVEL
+                        The level at which logging to disk [default: INFO].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --log_dir=DIR       The directory into which log files will be generated
+                        [default: /var/tmp].
+    --log_simple        Write a single log file rather than one log file per
+                        log level [default: False].
+    --log_to_scribe=[scheme:]LEVEL
+                        The level at which logging to scribe [default: NONE].
+                        Takes either LEVEL or scheme:LEVEL, where LEVEL is one
+                        of ['INFO', 'NONE', 'WARN', 'ERROR', 'DEBUG', 'FATAL']
+                        and scheme is one of ['google', 'plain'].
+    --scribe_category=CATEGORY
+                        The category used when logging to the scribe daemon.
+                        [default: python_default].
+    --scribe_buffer     Buffer messages when scribe is unavailable rather than
+                        dropping them. [default: False].
+    --scribe_host=HOST  The host running the scribe daemon. [default:
+                        localhost].
+    --scribe_port=PORT  The port used to connect to the scribe daemon.
+                        [default: 1463].
+```
diff --git a/source/documentation/latest/reference/scheduler-configuration.md b/source/documentation/latest/reference/scheduler-configuration.md
new file mode 100644
index 0000000..2d30133
--- /dev/null
+++ b/source/documentation/latest/reference/scheduler-configuration.md
@@ -0,0 +1,476 @@
+# Scheduler Configuration Reference
+
+The Aurora scheduler can take a variety of configuration options through command-line arguments.
+A list of the available options can be seen by running `aurora-scheduler -help`.
+
+Please refer to the [Operator Configuration Guide](../../operations/configuration/) for details on how
+to properly set the most important options.
+
+```
+Usage: org.apache.aurora.scheduler.app.SchedulerMain [options]
+  Options:
+    -allow_container_volumes
+      Allow passing in volumes in the job. Enabling this could pose a
+      privilege escalation threat.
+      Default: false
+    -allow_docker_parameters
+      Allow to pass docker container parameters in the job.
+      Default: false
+    -allow_gpu_resource
+      Allow jobs to request Mesos GPU resource.
+      Default: false
+    -allowed_container_types
+      Container types that are allowed to be used by jobs.
+      Default: [MESOS]
+    -allowed_job_environments
+      Regular expression describing the environments that are allowed to be
+      used by jobs.
+      Default: ^(prod|devel|test|staging\d*)$
+    -async_slot_stat_update_interval
+      Interval on which to try to update open slot stats.
+      Default: (1, mins)
+    -async_task_stat_update_interval
+      Interval on which to try to update resource consumption stats.
+      Default: (1, hrs)
+    -async_worker_threads
+      The number of worker threads to process async task operations with.
+      Default: 8
+  * -backup_dir
+      Directory to store backups under. Will be created if it does not exist.
+    -backup_interval
+      Minimum interval on which to write a storage backup.
+      Default: (1, hrs)
+  * -cluster_name
+      Name to identify the cluster being served.
+    -cron_scheduler_num_threads
+      Number of threads to use for the cron scheduler thread pool.
+      Default: 10
+    -cron_scheduling_max_batch_size
+      The maximum number of triggered cron jobs that can be processed in a
+      batch.
+      Default: 10
+    -cron_start_initial_backoff
+      Initial backoff delay while waiting for a previous cron run to be
+      killed.
+      Default: (5, secs)
+    -cron_start_max_backoff
+      Max backoff delay while waiting for a previous cron run to be killed.
+      Default: (1, mins)
+    -cron_timezone
+      TimeZone to use for cron predictions.
+      Default: GMT
+    -custom_executor_config
+      Path to custom executor settings configuration file.
+    -default_docker_parameters
+      Default docker parameters for any job that does not explicitly declare
+      parameters.
+      Default: []
+    -dlog_max_entry_size
+      Specifies the maximum entry size to append to the log. Larger entries
+      will be split across entry Frames.
+      Default: (512, KB)
+    -dlog_snapshot_interval
+      Specifies the frequency at which snapshots of local storage are taken
+      and written to the log.
+      Default: (1, hrs)
+    -enable_cors_for
+      List of domains for which CORS support should be enabled.
+    -enable_mesos_fetcher
+      Allow jobs to pass URIs to the Mesos Fetcher. Note that enabling this
+      feature could pose a privilege escalation threat.
+      Default: false
+    -enable_preemptor
+      Enable the preemptor and preemption
+      Default: true
+    -enable_revocable_cpus
+      Treat CPUs as a revocable resource.
+      Default: true
+    -enable_revocable_ram
+      Treat RAM as a revocable resource.
+      Default: false
+    -enable_update_affinity
+      Enable best-effort affinity of task updates.
+      Default: false
+    -executor_user
+      User to start the executor. Defaults to "root". Set this to an
+      unprivileged user if the mesos master was started with
+      "--no-root_submissions". If set to anything other than "root", the
+      executor will ignore the "role" setting for jobs since it can't use
+      setuid() anymore. This means that all your jobs will run under the
+      specified user and the user has to exist on the Mesos agents.
+      Default: root
+    -first_schedule_delay
+      Initial amount of time to wait before first attempting to schedule a
+      PENDING task.
+      Default: (1, ms)
+    -flapping_task_threshold
+      A task that repeatedly runs for less than this time is considered to be
+      flapping.
+      Default: (5, mins)
+    -framework_announce_principal
+      When 'framework_authentication_file' flag is set, the FrameworkInfo
+      registered with the mesos master will also contain the principal. This
+      is necessary if you intend to use mesos authorization via mesos ACLs.
+      The default will change in a future release. Changing this value is
+      backwards incompatible. For details, see MESOS-703.
+      Default: false
+    -framework_authentication_file
+      Properties file which contains framework credentials to authenticate
+      with Mesosmaster. Must contain the properties
+      'aurora_authentication_principal' and 'aurora_authentication_secret'.
+    -framework_failover_timeout
+      Time after which a framework is considered deleted.  SHOULD BE VERY
+      HIGH.
+      Default: (21, days)
+    -framework_name
+      Name used to register the Aurora framework with Mesos.
+      Default: Aurora
+    -global_container_mounts
+      A comma separated list of mount points (in host:container form) to mount
+      into all (non-mesos) containers.
+      Default: []
+    -history_max_per_job_threshold
+      Maximum number of terminated tasks to retain in a job history.
+      Default: 100
+    -history_min_retention_threshold
+      Minimum guaranteed time for task history retention before any pruning is
+      attempted.
+      Default: (1, hrs)
+    -history_prune_threshold
+      Time after which the scheduler will prune terminated task history.
+      Default: (2, days)
+    -hold_offers_forever
+      Hold resource offers indefinitely, disabling automatic offer decline
+      settings.
+      Default: false
+    -host_maintenance_polling_interval
+      Interval between polling for pending host maintenance requests.
+      Default: (1, mins)
+    -hostname
+      The hostname to advertise in ZooKeeper instead of the locally-resolved
+      hostname.
+    -http_authentication_mechanism
+      HTTP Authentication mechanism to use.
+      Default: NONE
+      Possible Values: [NONE, BASIC, NEGOTIATE]
+    -http_port
+      The port to start an HTTP server on.  Default value will choose a random
+      port.
+      Default: 0
+    -initial_flapping_task_delay
+      Initial amount of time to wait before attempting to schedule a flapping
+      task.
+      Default: (30, secs)
+    -initial_schedule_penalty
+      Initial amount of time to wait before attempting to schedule a task that
+      has failed to schedule.
+      Default: (1, secs)
+    -initial_task_kill_retry_interval
+      When killing a task, retry after this delay if mesos has not responded,
+      backing off up to transient_task_state_timeout
+      Default: (15, secs)
+    -ip
+      The ip address to listen. If not set, the scheduler will listen on all
+      interfaces.
+    -job_update_history_per_job_threshold
+      Maximum number of completed job updates to retain in a job update
+      history.
+      Default: 10
+    -job_update_history_pruning_interval
+      Job update history pruning interval.
+      Default: (15, mins)
+    -job_update_history_pruning_threshold
+      Time after which the scheduler will prune completed job update history.
+      Default: (30, days)
+    -kerberos_debug
+      Produce additional Kerberos debugging output.
+      Default: false
+    -kerberos_server_keytab
+      Path to the server keytab.
+    -kerberos_server_principal
+      Kerberos server principal to use, usually of the form
+      HTTP/aurora.example.com@EXAMPLE.COM
+    -max_flapping_task_delay
+      Maximum delay between attempts to schedule a flapping task.
+      Default: (5, mins)
+    -max_leading_duration
+      After leading for this duration, the scheduler should commit suicide.
+      Default: (1, days)
+    -max_parallel_coordinated_maintenance
+      Maximum number of coordinators that can be contacted in parallel.
+      Default: 10
+    -max_registration_delay
+      Max allowable delay to allow the driver to register before aborting
+      Default: (1, mins)
+    -max_reschedule_task_delay_on_startup
+      Upper bound of random delay for pending task rescheduling on scheduler
+      startup.
+      Default: (30, secs)
+    -max_saved_backups
+      Maximum number of backups to retain before deleting the oldest backups.
+      Default: 48
+    -max_schedule_attempts_per_sec
+      Maximum number of scheduling attempts to make per second.
+      Default: 40.0
+    -max_schedule_penalty
+      Maximum delay between attempts to schedule a PENDING tasks.
+      Default: (1, mins)
+    -max_sla_duration_secs
+      Maximum duration window for which SLA requirements are to be
+      satisfied.This does not apply to jobs that have a CoordinatorSlaPolicy.
+      Default: (2, hrs)
+    -max_status_update_batch_size
+      The maximum number of status updates that can be processed in a batch.
+      Default: 1000
+    -max_task_event_batch_size
+      The maximum number of task state change events that can be processed in
+      a batch.
+      Default: 300
+    -max_tasks_per_job
+      Maximum number of allowed tasks in a single job.
+      Default: 4000
+    -max_tasks_per_schedule_attempt
+      The maximum number of tasks to pick in a single scheduling attempt.
+      Default: 5
+    -max_update_instance_failures
+      Upper limit on the number of failures allowed during a job update. This
+      helps cap potentially unbounded entries into storage.
+      Default: 20000
+    -mesos_driver
+      Which Mesos Driver to use
+      Default: SCHEDULER_DRIVER
+      Possible Values: [SCHEDULER_DRIVER, V0_DRIVER, V1_DRIVER]
+  * -mesos_master_address
+      Address for the mesos master, can be a socket address or zookeeper path.
+    -mesos_role
+      The Mesos role this framework will register as. The default is to left
+      this empty, and the framework will register without any role and only
+      receive unreserved resources in offer.
+    -min_offer_hold_time
+      Minimum amount of time to hold a resource offer before declining.
+      Default: (5, mins)
+    -min_required_instances_for_sla_check
+      Minimum number of instances required for a job to be eligible for SLA
+      check. This does not apply to jobs that have a CoordinatorSlaPolicy.
+      Default: 20
+    -native_log_election_retries
+      The maximum number of attempts to obtain a new log writer.
+      Default: 20
+    -native_log_election_timeout
+      The timeout for a single attempt to obtain a new log writer.
+      Default: (15, secs)
+    -native_log_file_path
+      Path to a file to store the native log data in.  If the parent directory
+      doesnot exist it will be created.
+    -native_log_quorum_size
+      The size of the quorum required for all log mutations.
+      Default: 1
+    -native_log_read_timeout
+      The timeout for doing log reads.
+      Default: (5, secs)
+    -native_log_write_timeout
+      The timeout for doing log appends and truncations.
+      Default: (3, secs)
+    -native_log_zk_group_path
+      A zookeeper node for use by the native log to track the master
+      coordinator.
+    -offer_filter_duration
+      Duration after which we expect Mesos to re-offer unused resources. A
+      short duration improves scheduling performance in smaller clusters, but
+      might lead to resource starvation for other frameworks if you run many
+      frameworks in your cluster.
+      Default: (5, secs)
+    -offer_hold_jitter_window
+      Maximum amount of random jitter to add to the offer hold time window.
+      Default: (1, mins)
+    -offer_order
+      Iteration order for offers, to influence task scheduling. Multiple
+      orderings will be compounded together. E.g. CPU,MEMORY,RANDOM would sort
+      first by cpus offered, then memory and finally would randomize any equal
+      offers.
+      Default: [RANDOM]
+    -offer_reservation_duration
+      Time to reserve a agent's offers while trying to satisfy a task
+      preempting another.
+      Default: (3, mins)
+    -offer_set_module
+      Custom Guice module to provide a custom OfferSet.
+      Default: class org.apache.aurora.scheduler.offers.OfferManagerModule$OfferSetModule
+    -offer_static_ban_cache_max_size
+      The number of offers to hold in the static ban cache. If no value is
+      specified, the cache will grow indefinitely. However, entries will
+      expire within 'min_offer_hold_time' + 'offer_hold_jitter_window' of
+      being written.
+      Default: 9223372036854775807
+    -partition_aware
+      Enable paritition-aware status updates.
+      Default: false
+    -populate_discovery_info
+      If true, Aurora populates DiscoveryInfo field of Mesos TaskInfo.
+      Default: false
+    -preemption_delay
+      Time interval after which a pending task becomes eligible to preempt
+      other tasks
+      Default: (3, mins)
+    -preemption_reservation_max_batch_size
+      The maximum number of reservations for a task group to be made in a
+      batch.
+      Default: 5
+    -preemption_slot_finder_modules
+      Guice modules for custom preemption slot searching for pending tasks.
+      Default: [class org.apache.aurora.scheduler.preemptor.PendingTaskProcessorModule, class org.apache.aurora.scheduler.preemptor.PreemptionVictimFilterModule]
+    -preemption_slot_hold_time
+      Time to hold a preemption slot found before it is discarded.
+      Default: (5, mins)
+    -preemption_slot_search_initial_delay
+      Initial amount of time to delay preemption slot searching after
+      scheduler start up.
+      Default: (3, mins)
+    -preemption_slot_search_interval
+      Time interval between pending task preemption slot searches.
+      Default: (1, mins)
+    -receive_revocable_resources
+      Allows receiving revocable resource offers from Mesos.
+      Default: false
+    -reconciliation_explicit_batch_interval
+      Interval between explicit batch reconciliation requests.
+      Default: (5, secs)
+    -reconciliation_explicit_batch_size
+      Number of tasks in a single batch request sent to Mesos for explicit
+      reconciliation.
+      Default: 1000
+    -reconciliation_explicit_interval
+      Interval on which scheduler will ask Mesos for status updates of
+      allnon-terminal tasks known to scheduler.
+      Default: (60, mins)
+    -reconciliation_implicit_interval
+      Interval on which scheduler will ask Mesos for status updates of
+      allnon-terminal tasks known to Mesos.
+      Default: (60, mins)
+    -reconciliation_initial_delay
+      Initial amount of time to delay task reconciliation after scheduler
+      start up.
+      Default: (1, mins)
+    -reconciliation_schedule_spread
+      Difference between explicit and implicit reconciliation intervals
+      intended to create a non-overlapping task reconciliation schedule.
+      Default: (30, mins)
+    -require_docker_use_executor
+      If false, Docker tasks may run without an executor (EXPERIMENTAL)
+      Default: true
+    -scheduling_max_batch_size
+      The maximum number of scheduling attempts that can be processed in a
+      batch.
+      Default: 3
+    -serverset_endpoint_name
+      Name of the scheduler endpoint published in ZooKeeper.
+      Default: http
+  * -serverset_path
+      ZooKeeper ServerSet path to register at.
+    -shiro_after_auth_filter
+      Fully qualified class name of the servlet filter to be applied after the
+      shiro auth filters are applied.
+    -shiro_credentials_matcher
+      The shiro credentials matcher to use (will be constructed by Guice).
+      Default: class org.apache.shiro.authc.credential.SimpleCredentialsMatcher
+    -shiro_ini_path
+      Path to shiro.ini for authentication and authorization configuration.
+    -shiro_realm_modules
+      Guice modules for configuring Shiro Realms.
+      Default: [class org.apache.aurora.scheduler.http.api.security.IniShiroRealmModule]
+    -sla_aware_action_max_batch_size
+      The maximum number of sla aware update actions that can be processed in
+      a batch.
+      Default: 300
+    -sla_aware_kill_non_prod
+      Enables SLA awareness for drain and and update for non-production tasks
+      Default: false
+    -sla_aware_kill_retry_max_delay
+      Maximum amount of time to wait between attempting to perform an
+      SLA-Aware kill on a task.
+      Default: (5, mins)
+    -sla_aware_kill_retry_min_delay
+      Minimum amount of time to wait between attempting to perform an
+      SLA-Aware kill on a task.
+      Default: (1, mins)
+    -sla_coordinator_timeout
+      Timeout interval for communicating with Coordinator.
+      Default: (1, mins)
+    -sla_non_prod_metrics
+      Metric categories collected for non production tasks.
+      Default: []
+    -sla_prod_metrics
+      Metric categories collected for production tasks.
+      Default: [JOB_UPTIMES, PLATFORM_UPTIME, MEDIANS]
+    -sla_stat_refresh_interval
+      The SLA stat refresh interval.
+      Default: (1, mins)
+    -stat_retention_period
+      Time for a stat to be retained in memory before expiring.
+      Default: (1, hrs)
+    -stat_sampling_interval
+      Statistic value sampling interval.
+      Default: (1, secs)
+    -task_assigner_modules
+      Guice modules for customizing task assignment.
+      Default: [class org.apache.aurora.scheduler.scheduling.TaskAssignerImplModule]
+    -thermos_executor_cpu
+      The number of CPU cores to allocate for each instance of the executor.
+      Default: 0.25
+    -thermos_executor_flags
+      Extra arguments to be passed to the thermos executor
+    -thermos_executor_path
+      Path to the thermos executor entry point.
+    -thermos_executor_ram
+      The amount of RAM to allocate for each instance of the executor.
+      Default: (128, MB)
+    -thermos_executor_resources
+      A comma separated list of additional resources to copy into the
+      sandbox.Note: if thermos_executor_path is not the thermos_executor.pex
+      file itself, this must include it.
+      Default: []
+    -thermos_home_in_sandbox
+      If true, changes HOME to the sandbox before running the executor. This
+      primarily has the effect of causing the executor and runner to extract
+      themselves into the sandbox.
+      Default: false
+    -thrift_method_interceptor_modules
+      Custom Guice module(s) to provide additional Thrift method interceptors.
+      Default: []
+    -tier_config
+      Configuration file defining supported task tiers, task traits and
+      behaviors.
+    -transient_task_state_timeout
+      The amount of time after which to treat a task stuck in a transient
+      state as LOST.
+      Default: (5, mins)
+    -unavailability_threshold
+      Threshold time, when running tasks should be drained from a host, before
+      a host becomes unavailable. Should be greater than min_offer_hold_time +
+      offer_hold_jitter_window.
+      Default: (6, mins)
+    -update_affinity_reservation_hold_time
+      How long to wait for a reserved agent to reoffer freed up resources.
+      Default: (3, mins)
+    -viz_job_url_prefix
+      URL prefix for job container stats.
+      Default: <empty string>
+    -webhook_config
+      Path to webhook configuration file.
+    -zk_chroot_path
+      chroot path to use for the ZooKeeper connections
+    -zk_connection_timeout
+      The ZooKeeper connection timeout.
+      Default: (10, secs)
+    -zk_digest_credentials
+      user:password to use when authenticating with ZooKeeper.
+  * -zk_endpoints
+      Endpoint specification for the ZooKeeper servers.
+    -zk_in_proc
+      Launches an embedded zookeeper server for local testing causing
+      -zk_endpoints to be ignored if specified.
+      Default: false
+    -zk_session_timeout
+      The ZooKeeper session timeout.
+      Default: (15, secs)
+```
diff --git a/source/documentation/latest/reference/scheduler-endpoints.md b/source/documentation/latest/reference/scheduler-endpoints.md
new file mode 100644
index 0000000..ddae76b
--- /dev/null
+++ b/source/documentation/latest/reference/scheduler-endpoints.md
@@ -0,0 +1,19 @@
+# HTTP endpoints
+
+There are a number of HTTP endpoints that the Aurora scheduler exposes. These allow various
+operational tasks to be performed on the scheduler. Below is an (incomplete) list of such endpoints
+and a brief explanation of what they do.
+
+## Leader health
+The /leaderhealth endpoint enables performing health checks on the scheduler instances inorder
+to forward requests to the leading scheduler. This is typically used by a load balancer such as
+HAProxy or AWS ELB.
+
+When a HTTP GET request is issued on this endpoint, it responds as follows:
+
+- If the instance that received the GET request is the leading scheduler, a HTTP status code of
+  `200 OK` is returned.
+- If the instance that received the GET request is not the leading scheduler but a leader does
+  exist, a HTTP status code of `503 SERVICE_UNAVAILABLE` is returned.
+- If no leader currently exists or the leader is unknown, a HTTP status code of `502 BAD_GATEWAY`
+  is returned.
diff --git a/source/documentation/latest/reference/task-lifecycle.md b/source/documentation/latest/reference/task-lifecycle.md
new file mode 100644
index 0000000..b4642b3
--- /dev/null
+++ b/source/documentation/latest/reference/task-lifecycle.md
@@ -0,0 +1,175 @@
+# Task Lifecycle
+
+When Aurora reads a configuration file and finds a `Job` definition, it:
+
+1.  Evaluates the `Job` definition.
+2.  Splits the `Job` into its constituent `Task`s.
+3.  Sends those `Task`s to the scheduler.
+4.  The scheduler puts the `Task`s into `PENDING` state, starting each
+    `Task`'s life cycle.
+
+
+![Life of a task](../images/lifeofatask.png)
+
+Please note, a couple of task states described below are missing from
+this state diagram.
+
+
+## PENDING to RUNNING states
+
+When a `Task` is in the `PENDING` state, the scheduler constantly
+searches for machines satisfying that `Task`'s resource request
+requirements (RAM, disk space, CPU time) while maintaining configuration
+constraints such as "a `Task` must run on machines  dedicated  to a
+particular role" or attribute limit constraints such as "at most 2
+`Task`s from the same `Job` may run on each rack". When the scheduler
+finds a suitable match, it assigns the `Task` to a machine and puts the
+`Task` into the `ASSIGNED` state.
+
+From the `ASSIGNED` state, the scheduler sends an RPC to the agent
+machine containing `Task` configuration, which the agent uses to spawn
+an executor responsible for the `Task`'s lifecycle. When the scheduler
+receives an acknowledgment that the machine has accepted the `Task`,
+the `Task` goes into `STARTING` state.
+
+`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
+initialized, Thermos begins to invoke `Process`es. Also, the agent
+machine sends an update to the scheduler that the `Task` is
+in `RUNNING` state, only after the task satisfies the liveness requirements.
+See [Health Checking](../features/services#health-checking) for more details
+for how to configure health checks.
+
+
+
+## RUNNING to terminal states
+
+There are various ways that an active `Task` can transition into a terminal
+state. By definition, it can never leave this state. However, depending on
+nature of the termination and the originating `Job` definition
+(e.g. `service`, `max_task_failures`), a replacement `Task` might be
+scheduled.
+
+### Natural Termination: FINISHED, FAILED
+
+A `RUNNING` `Task` can terminate without direct user interaction. For
+example, it may be a finite computation that finishes, even something as
+simple as `echo hello world.`, or it could be an exceptional condition in
+a long-lived service. If the `Task` is successful (its underlying
+processes have succeeded with exit status `0` or finished without
+reaching failure limits) it moves into `FINISHED` state. If it finished
+after reaching a set of failure limits, it goes into `FAILED` state.
+
+A terminated `TASK` which is subject to rescheduling will be temporarily
+`THROTTLED`, if it is considered to be flapping. A task is flapping, if its
+previous invocation was terminated after less than 5 minutes (scheduler
+default). The time penalty a task has to remain in the `THROTTLED` state,
+before it is eligible for rescheduling, increases with each consecutive
+failure.
+
+### Forceful Termination: KILLING, RESTARTING
+
+You can terminate a `Task` by issuing an `aurora job kill` command, which
+moves it into `KILLING` state. The scheduler then sends the agent a
+request to terminate the `Task`. If the scheduler receives a successful
+response, it moves the Task into `KILLED` state and never restarts it.
+
+If a `Task` is forced into the `RESTARTING` state via the `aurora job restart`
+command, the scheduler kills the underlying task but in parallel schedules
+an identical replacement for it.
+
+In any case, the responsible executor on the agent follows an escalation
+sequence when killing a running task:
+
+  1. If a `HttpLifecycleConfig` is not present, skip to (4).
+  2. Send a POST to the `graceful_shutdown_endpoint` and wait
+  `graceful_shutdown_wait_secs` seconds.
+  3. Send a POST to the `shutdown_endpoint` and wait
+  `shutdown_wait_secs` seconds.
+  4. Send SIGTERM (`kill`) and wait at most `finalization_wait` seconds.
+  5. Send SIGKILL (`kill -9`).
+
+If the executor notices that all `Process`es in a `Task` have aborted
+during this sequence, it will not proceed with subsequent steps.
+Note that graceful shutdown is best-effort, and due to the many
+inevitable realities of distributed systems, it may not be performed.
+
+### Unexpected Termination: LOST
+
+If a `Task` stays in a transient task state for too long (such as `ASSIGNED`
+or `STARTING`), the scheduler forces it into `LOST` state, creating a new
+`Task` in its place that's sent into `PENDING` state.
+
+In addition, if the Mesos core tells the scheduler that a agent has
+become unhealthy (or outright disappeared), the `Task`s assigned to that
+agent go into `LOST` state and new `Task`s are created in their place.
+From `PENDING` state, there is no guarantee a `Task` will be reassigned
+to the same machine unless job constraints explicitly force it there.
+
+
+## RUNNING to PARTITIONED states
+If Aurora is configured to enable partition awareness, a task which is in a
+running state can transition to `PARTITIONED`. This happens when the state
+of the task in Mesos becomes unknown. By default Aurora errs on the side of
+availability, so all tasks that transition to `PARTITIONED` are immediately
+transitioned to `LOST`.
+
+This policy is not ideal for all types of workloads you may wish to run in
+your Aurora cluster, e.g. for jobs where task failures are very expensive.
+So job owners may set their own `PartitionPolicy` where they can control
+how long to remain in `PARTITIONED` before transitioning to `LOST`. Or they
+can disable any automatic attempts to `reschedule` when in `PARTITIONED`,
+effectively waiting out the partition for as long as possible.
+
+
+## PARTITIONED and transient states
+The `PartitionPolicy` provided by users only applies to tasks which are
+currently running. When tasks are moving in and out of transient states,
+e.g. tasks being updated, restarted, preempted, etc., `PARTITIONED` tasks
+are moved immediately to `LOST`. This prevents situations where system
+or user-initiated actions are blocked indefinitely waiting for partitions
+to resolve (that may never be resolved).
+
+
+### Giving Priority to Production Tasks: PREEMPTING
+
+Sometimes a Task needs to be interrupted, such as when a non-production
+Task's resources are needed by a higher priority production Task. This
+type of interruption is called a *pre-emption*. When this happens in
+Aurora, the non-production Task is killed and moved into
+the `PREEMPTING` state  when both the following are true:
+
+- The task being killed is a non-production task.
+- The other task is a `PENDING` production task that hasn't been
+  scheduled due to a lack of resources.
+
+The scheduler UI shows the non-production task was preempted in favor of
+the production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
+
+Note that non-production tasks consuming many resources are likely to be
+preempted in favor of production tasks.
+
+### Making Room for Maintenance: DRAINING
+
+Cluster operators can set agent into maintenance mode. This will transition
+all `Task` running on this agent into `DRAINING` and eventually to `KILLED`.
+Drained `Task`s will be restarted on other agents for which no maintenance
+has been announced yet.
+
+
+
+## State Reconciliation
+
+Due to the many inevitable realities of distributed systems, there might
+be a mismatch of perceived and actual cluster state (e.g. a machine returns
+from a `netsplit` but the scheduler has already marked all its `Task`s as
+`LOST` and rescheduled them).
+
+Aurora regularly runs a state reconciliation process in order to detect
+and correct such issues (e.g. by killing the errant `RUNNING` tasks).
+By default, the proper detection of all failure scenarios and inconsistencies
+may take up to an hour.
+
+To emphasize this point: there is no uniqueness guarantee for a single
+instance of a job in the presence of network partitions. If the `Task`
+requires that, it should be baked in at the application level using a
+distributed coordination service such as Zookeeper.
diff --git a/source/documentation/latest/sla.md b/source/documentation/latest/sla.md
deleted file mode 100644
index 14e9108..0000000
--- a/source/documentation/latest/sla.md
+++ /dev/null
@@ -1,176 +0,0 @@
-Aurora SLA Measurement
---------------
-
-- [Overview](#overview)
-- [Metric Details](#metric-details)
-  - [Platform Uptime](#platform-uptime)
-  - [Job Uptime](#job-uptime)
-  - [Median Time To Assigned (MTTA)](#median-time-to-assigned-\(mtta\))
-  - [Median Time To Running (MTTR)](#median-time-to-running-\(mttr\))
-- [Limitations](#limitations)
-
-## Overview
-
-The primary goal of the feature is collection and monitoring of Aurora job SLA (Service Level
-Agreements) metrics that defining a contractual relationship between the Aurora/Mesos platform
-and hosted services.
-
-The Aurora SLA feature currently supports stat collection only for service (non-cron)
-production jobs (`"production = True"` in your `.aurora` config).
-
-Counters that track SLA measurements are computed periodically within the scheduler.
-The individual instance metrics are refreshed every minute (configurable via
-`sla_stat_refresh_interval`). The instance counters are subsequently aggregated by
-relevant grouping types before exporting to scheduler `/vars` endpoint (when using `vagrant`
-that would be `http://192.168.33.7:8081/vars`)
-
-## Metric Details
-
-### Platform Uptime
-
-*Aggregate amount of time a job spends in a non-runnable state due to platform unavailability
-or scheduling delays. This metric tracks Aurora/Mesos uptime performance and reflects on any
-system-caused downtime events (tasks LOST or DRAINED). Any user-initiated task kills/restarts
-will not degrade this metric.*
-
-**Collection scope:**
-
-* Per job - `sla_<job_key>_platform_uptime_percent`
-* Per cluster - `sla_cluster_platform_uptime_percent`
-
-**Units:** percent
-
-A fault in the task environment may cause the Aurora/Mesos to have different views on the task state
-or lose track of the task existence. In such cases, the service task is marked as LOST and
-rescheduled by Aurora. For example, this may happen when the task stays in ASSIGNED or STARTING
-for too long or the Mesos slave becomes unhealthy (or disappears completely). The time between
-task entering LOST and its replacement reaching RUNNING state is counted towards platform downtime.
-
-Another example of a platform downtime event is the administrator-requested task rescheduling. This
-happens during planned Mesos slave maintenance when all slave tasks are marked as DRAINED and
-rescheduled elsewhere.
-
-To accurately calculate Platform Uptime, we must separate platform incurred downtime from user
-actions that put a service instance in a non-operational state. It is simpler to isolate
-user-incurred downtime and treat all other downtime as platform incurred.
-
-Currently, a user can cause a healthy service (task) downtime in only two ways: via `killTasks`
-or `restartShards` RPCs. For both, their affected tasks leave an audit state transition trail
-relevant to uptime calculations. By applying a special "SLA meaning" to exposed task state
-transition records, we can build a deterministic downtime trace for every given service instance.
-
-A task going through a state transition carries one of three possible SLA meanings
-(see [SlaAlgorithm.java](../src/main/java/org/apache/aurora/scheduler/sla/SlaAlgorithm.java) for
-sla-to-task-state mapping):
-
-* Task is UP: starts a period where the task is considered to be up and running from the Aurora
-  platform standpoint.
-
-* Task is DOWN: starts a period where the task cannot reach the UP state for some
-  non-user-related reason. Counts towards instance downtime.
-
-* Task is REMOVED from SLA: starts a period where the task is not expected to be UP due to
-  user initiated action or failure. We ignore this period for the uptime calculation purposes.
-
-This metric is recalculated over the last sampling period (last minute) to account for
-any UP/DOWN/REMOVED events. It ignores any UP/DOWN events not immediately adjacent to the
-sampling interval as well as adjacent REMOVED events.
-
-### Job Uptime
-
-*Percentage of the job instances considered to be in RUNNING state for the specified duration
-relative to request time. This is a purely application side metric that is considering aggregate
-uptime of all RUNNING instances. Any user- or platform initiated restarts directly affect
-this metric.*
-
-**Collection scope:** We currently expose job uptime values at 5 pre-defined
-percentiles (50th,75th,90th,95th and 99th):
-
-* `sla_<job_key>_job_uptime_50_00_sec`
-* `sla_<job_key>_job_uptime_75_00_sec`
-* `sla_<job_key>_job_uptime_90_00_sec`
-* `sla_<job_key>_job_uptime_95_00_sec`
-* `sla_<job_key>_job_uptime_99_00_sec`
-
-**Units:** seconds
-You can also get customized real-time stats from aurora client. See `aurora sla -h` for
-more details.
-
-### Median Time To Assigned (MTTA)
-
-*Median time a job spends waiting for its tasks to be assigned to a host. This is a combined
-metric that helps track the dependency of scheduling performance on the requested resources
-(user scope) as well as the internal scheduler bin-packing algorithm efficiency (platform scope).*
-
-**Collection scope:**
-
-* Per job - `sla_<job_key>_mtta_ms`
-* Per cluster - `sla_cluster_mtta_ms`
-* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
-[ResourceAggregates.java](../src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
-  * By CPU:
-    * `sla_cpu_small_mtta_ms`
-    * `sla_cpu_medium_mtta_ms`
-    * `sla_cpu_large_mtta_ms`
-    * `sla_cpu_xlarge_mtta_ms`
-    * `sla_cpu_xxlarge_mtta_ms`
-  * By RAM:
-    * `sla_ram_small_mtta_ms`
-    * `sla_ram_medium_mtta_ms`
-    * `sla_ram_large_mtta_ms`
-    * `sla_ram_xlarge_mtta_ms`
-    * `sla_ram_xxlarge_mtta_ms`
-  * By DISK:
-    * `sla_disk_small_mtta_ms`
-    * `sla_disk_medium_mtta_ms`
-    * `sla_disk_large_mtta_ms`
-    * `sla_disk_xlarge_mtta_ms`
-    * `sla_disk_xxlarge_mtta_ms`
-
-**Units:** milliseconds
-
-MTTA only considers instances that have already reached ASSIGNED state and ignores those
-that are still PENDING. This ensures straggler instances (e.g. with unreasonable resource
-constraints) do not affect metric curves.
-
-### Median Time To Running (MTTR)
-
-*Median time a job waits for its tasks to reach RUNNING state. This is a comprehensive metric
-reflecting on the overall time it takes for the Aurora/Mesos to start executing user content.*
-
-**Collection scope:**
-
-* Per job - `sla_<job_key>_mttr_ms`
-* Per cluster - `sla_cluster_mttr_ms`
-* Per instance size (small, medium, large, x-large, xx-large). Size are defined in:
-[ResourceAggregates.java](../src/main/java/org/apache/aurora/scheduler/base/ResourceAggregates.java)
-  * By CPU:
-    * `sla_cpu_small_mttr_ms`
-    * `sla_cpu_medium_mttr_ms`
-    * `sla_cpu_large_mttr_ms`
-    * `sla_cpu_xlarge_mttr_ms`
-    * `sla_cpu_xxlarge_mttr_ms`
-  * By RAM:
-    * `sla_ram_small_mttr_ms`
-    * `sla_ram_medium_mttr_ms`
-    * `sla_ram_large_mttr_ms`
-    * `sla_ram_xlarge_mttr_ms`
-    * `sla_ram_xxlarge_mttr_ms`
-  * By DISK:
-    * `sla_disk_small_mttr_ms`
-    * `sla_disk_medium_mttr_ms`
-    * `sla_disk_large_mttr_ms`
-    * `sla_disk_xlarge_mttr_ms`
-    * `sla_disk_xxlarge_mttr_ms`
-
-**Units:** milliseconds
-
-MTTR only considers instances in RUNNING state. This ensures straggler instances (e.g. with
-unreasonable resource constraints) do not affect metric curves.
-
-## Limitations
-
-* The availability of Aurora SLA metrics is bound by the scheduler availability.
-
-* All metrics are calculated at a pre-defined interval (currently set at 1 minute).
-  Scheduler restarts may result in missed collections.
\ No newline at end of file
diff --git a/source/documentation/latest/storage-config.md b/source/documentation/latest/storage-config.md
deleted file mode 100644
index 023d730..0000000
--- a/source/documentation/latest/storage-config.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Storage Configuration And Maintenance
-
-- [Overview](#overview)
-- [Scheduler storage configuration flags](#scheduler-storage-configuration-flags)
-  - [Mesos replicated log configuration flags](#mesos-replicated-log-configuration-flags)
-    - [-native_log_quorum_size](#-native_log_quorum_size)
-    - [-native_log_file_path](#-native_log_file_path)
-    - [-native_log_zk_group_path](#-native_log_zk_group_path)
-  - [Backup configuration flags](#backup-configuration-flags)
-    - [-backup_interval](#-backup_interval)
-    - [-backup_dir](#-backup_dir)
-    - [-max_saved_backups](#-max_saved_backups)
-- [Recovering from a scheduler backup](#recovering-from-a-scheduler-backup)
-  - [Summary](#summary)
-  - [Preparation](#preparation)
-  - [Cleanup and re-initialize Mesos replicated log](#cleanup-and-re-initialize-mesos-replicated-log)
-  - [Restore from backup](#restore-from-backup)
-  - [Cleanup](#cleanup)
-
-## Overview
-
-This document summarizes Aurora storage configuration and maintenance details and is
-intended for use by anyone deploying and/or maintaining Aurora.
-
-For a high level overview of the Aurora storage architecture refer to [this document](/documentation/latest/storage/).
-
-## Scheduler storage configuration flags
-
-Below is a summary of scheduler storage configuration flags that either don't have default values
-or require attention before deploying in a production environment.
-
-### Mesos replicated log configuration flags
-
-#### -native_log_quorum_size
-Defines the Mesos replicated log quorum size. See
-[the replicated log configuration document](deploying-aurora-scheduler.md#replicated-log-configuration)
-on how to choose the right value.
-
-#### -native_log_file_path
-Location of the Mesos replicated log files. Consider allocating a dedicated disk (preferably SSD)
-for Mesos replicated log files to ensure optimal storage performance.
-
-#### -native_log_zk_group_path
-ZooKeeper path used for Mesos replicated log quorum discovery.
-
-See [code](../src/main/java/org/apache/aurora/scheduler/log/mesos/MesosLogStreamModule.java) for
-other available Mesos replicated log configuration options and default values.
-
-### Backup configuration flags
-
-Configuration options for the Aurora scheduler backup manager.
-
-#### -backup_interval
-The interval on which the scheduler writes local storage backups.  The default is every hour.
-
-#### -backup_dir
-Directory to write backups to.
-
-#### -max_saved_backups
-Maximum number of backups to retain before deleting the oldest backup(s).
-
-## Recovering from a scheduler backup
-
-- [Overview](#overview)
-- [Preparation](#preparation)
-- [Assess Mesos replicated log damage](#assess-mesos-replicated-log-damage)
-- [Restore from backup](#restore-from-backup)
-- [Cleanup](#cleanup)
-
-**Be sure to read the entire page before attempting to restore from a backup, as it may have
-unintended consequences.**
-
-### Summary
-
-The restoration procedure replaces the existing (possibly corrupted) Mesos replicated log with an
-earlier, backed up, version and requires all schedulers to be taken down temporarily while
-restoring. Once completed, the scheduler state resets to what it was when the backup was created.
-This means any jobs/tasks created or updated after the backup are unknown to the scheduler and will
-be killed shortly after the cluster restarts. All other tasks continue operating as normal.
-
-Usually, it is a bad idea to restore a backup that is not extremely recent (i.e. older than a few
-hours). This is because the scheduler will expect the cluster to look exactly as the backup does,
-so any tasks that have been rescheduled since the backup was taken will be killed.
-
-### Preparation
-
-Follow these steps to prepare the cluster for restoring from a backup:
-
-* Stop all scheduler instances
-
-* Consider blocking external traffic on a port defined in `-http_port` for all schedulers to
-prevent users from interacting with the scheduler during the restoration process. This will help
-troubleshooting by reducing the scheduler log noise and prevent users from making changes that will
-be erased after the backup snapshot is restored
-
-* Next steps are required to put scheduler into a partially disabled state where it would still be
-able to accept storage recovery requests but unable to schedule or change task states. This may be
-accomplished by updating the following scheduler configuration options:
-  * Set `-mesos_master_address` to a non-existent zk address. This will prevent scheduler from
-    registering with Mesos. E.g.: `-mesos_master_address=zk://localhost:2181`
-  * `-max_registration_delay` - set to sufficiently long interval to prevent registration timeout
-    and as a result scheduler suicide. E.g: `-max_registration_delay=360min`
-  * Make sure `-gc_executor_path` option is not set to prevent accidental task GC. This is
-    important as scheduler will attempt to reconcile the cluster state and will kill all tasks when
-    restarted with an empty Mesos replicated log.
-
-* Restart all schedulers
-
-### Cleanup and re-initialize Mesos replicated log
-
-Get rid of the corrupted files and re-initialize Mesos replicate log:
-
-* Stop schedulers
-* Delete all files under `-native_log_file_path` on all schedulers
-* Initialize Mesos replica's log file: `mesos-log initialize <-native_log_file_path>`
-* Restart schedulers
-
-### Restore from backup
-
-At this point the scheduler is ready to rehydrate from the backup:
-
-* Identify the leading scheduler by:
-  * running `aurora_admin get_scheduler <cluster>` - if scheduler is responsive
-  * examining scheduler logs
-  * or examining Zookeeper registration under the path defined by `-zk_endpoints`
-    and `-serverset_path`
-
-* Locate the desired backup file, copy it to the leading scheduler and stage recovery by running
-the following command on a leader
-`aurora_admin scheduler_stage_recovery <cluster> scheduler-backup-<yyyy-MM-dd-HH-mm>`
-
-* At this point, the recovery snapshot is staged and available for manual verification/modification
-via `aurora_admin scheduler_print_recovery_tasks` and `scheduler_delete_recovery_tasks` commands.
-See `aurora_admin help <command>` for usage details.
-
-* Commit recovery. This instructs the scheduler to overwrite the existing Mesosreplicated log with
-the provided backup snapshot and initiate a mandatory failover
-`aurora_admin scheduler_commit_recovery <cluster>`
-
-### Cleanup
-Undo any modification done during [Preparation](#preparation) sequence.
-
diff --git a/source/documentation/latest/storage.md b/source/documentation/latest/storage.md
deleted file mode 100644
index 6ffed54..0000000
--- a/source/documentation/latest/storage.md
+++ /dev/null
@@ -1,88 +0,0 @@
-#Aurora Scheduler Storage
-
-- [Overview](#overview)
-- [Reads, writes, modifications](#reads-writes-modifications)
-  - [Read lifecycle](#read-lifecycle)
-  - [Write lifecycle](#write-lifecycle)
-- [Atomicity, consistency and isolation](#atomicity-consistency-and-isolation)
-- [Population on restart](#population-on-restart)
-
-## Overview
-
-Aurora scheduler maintains data that need to be persisted to survive failovers and restarts.
-For example:
-
-* Task configurations and scheduled task instances
-* Job update configurations and update progress
-* Production resource quotas
-* Mesos resource offer host attributes
-
-Aurora solves its persistence needs by leveraging the Mesos implementation of a Paxos replicated
-log [[1]](https://ramcloud.stanford.edu/~ongaro/userstudy/paxos.pdf)
-[[2]](http://en.wikipedia.org/wiki/State_machine_replication) with a key-value
-[LevelDB](https://github.com/google/leveldb) storage as persistence media.
-
-Conceptually, it can be represented by the following major components:
-
-* Volatile storage: in-memory cache of all available data. Implemented via in-memory
-[H2 Database](http://www.h2database.com/html/main.html) and accessed via
-[MyBatis](http://mybatis.github.io/mybatis-3/).
-* Log manager: interface between Aurora storage and Mesos replicated log. The default schema format
-is [thrift](https://github.com/apache/thrift). Data is stored in serialized binary form.
-* Snapshot manager: all data is periodically persisted in Mesos replicated log in a single snapshot.
-This helps establishing periodic recovery checkpoints and speeds up volatile storage recovery on
-restart.
-* Backup manager: as a precaution, snapshots are periodically written out into backup files.
-This solves a [disaster recovery problem](storage-config.md#recovering-from-a-scheduler-backup)
-in case of a complete loss or corruption of Mesos log files.
-
-![Storage hierarchy](images/storage_hierarchy.png)
-
-## Reads, writes, modifications
-
-All services in Aurora access data via a set of predefined store interfaces (aka stores) logically
-grouped by the type of data they serve. Every interface defines a specific set of operations allowed
-on the data thus abstracting out the storage access and the actual persistence implementation. The
-latter is especially important in view of a general immutability of persisted data. With the Mesos
-replicated log as the underlying persistence solution, data can be read and written easily but not
-modified. All modifications are simulated by saving new versions of modified objects. This feature
-and general performance considerations justify the existence of the volatile in-memory store.
-
-### Read lifecycle
-
-There are two types of reads available in Aurora: consistent and weakly-consistent. The difference
-is explained [below](#atomicity-and-isolation).
-
-All reads are served from the volatile storage making reads generally cheap storage operations
-from the performance standpoint. The majority of the volatile stores are represented by the
-in-memory H2 database. This allows for rich schema definitions, queries and relationships that
-key-value storage is unable to match.
-
-### Write lifecycle
-
-Writes are more involved operations since in addition to updating the volatile store data has to be
-appended to the replicated log. Data is not available for reads until fully ack-ed by both
-replicated log and volatile storage.
-
-## Atomicity, consistency and isolation
-
-Aurora uses [write-ahead logging](http://en.wikipedia.org/wiki/Write-ahead_logging) to ensure
-consistency between replicated and volatile storage. In Aurora, data is first written into the
-replicated log and only then updated in the volatile store.
-
-Aurora storage uses read-write locks to serialize data mutations and provide consistent view of the
-available data. The available `Storage` interface exposes 3 major types of operations:
-* `consistentRead` - access is locked using reader's lock and provides consistent view on read
-* `weaklyConsistentRead` - access is lock-less. Delivers best contention performance but may result
-in stale reads
-* `write` - access is fully serialized by using writer's lock. Operation success requires both
-volatile and replicated writes to succeed.
-
-The consistency of the volatile store is enforced via H2 transactional isolation.
-
-## Population on restart
-
-Any time a scheduler restarts, it restores its volatile state from the most recent position recorded
-in the replicated log by restoring the snapshot and replaying individual log entries on top to fully
-recover the state up to the last write.
-
diff --git a/source/documentation/latest/test-resource-generation.md b/source/documentation/latest/test-resource-generation.md
deleted file mode 100644
index 1969385..0000000
--- a/source/documentation/latest/test-resource-generation.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Generating test resources
-
-## Background
-The Aurora source repository and distributions contain several
-[binary files](../src/test/resources/org/apache/thermos/root/checkpoints) to
-qualify the backwards-compatibility of thermos with checkpoint data. Since
-thermos persists state to disk, to be read by other components (the GC executor
-and the thermos observer), it is important that we have tests that prevent
-regressions affecting the ability to parse previously-written data.
-
-## Generating test files
-The files included represent persisted checkpoints that exercise different
-features of thermos. The existing files should not be modified unless
-we are accepting backwards incompatibility, such as with a major release.
-
-It is not practical to write source code to generate these files on the fly,
-as source would be vulnerable to drift (e.g. due to refactoring) in ways
-that would undermine the goal of ensuring backwards compatibility.
-
-The most common reason to add a new checkpoint file would be to provide
-coverage for new thermos features that alter the data format. This is
-accomplished by writing and running a
-[job configuration](/documentation/latest/configuration-reference/) that exercises the feature, and
-copying the checkpoint file from the sandbox directory, by default this is
-`/var/run/thermos/checkpoints/<aurora task id>`.
diff --git a/source/documentation/latest/tutorial.md b/source/documentation/latest/tutorial.md
deleted file mode 100644
index e7d83f3..0000000
--- a/source/documentation/latest/tutorial.md
+++ /dev/null
@@ -1,265 +0,0 @@
-Aurora Tutorial
----------------
-
-- [Introduction](#introduction)
-- [Setup: Install Aurora](#setup-install-aurora)
-- [The Script](#the-script)
-- [Aurora Configuration](#aurora-configuration)
-- [What's Going On In That Configuration File?](#whats-going-on-in-that-configuration-file)
-- [Creating the Job](#creating-the-job)
-- [Watching the Job Run](#watching-the-job-run)
-- [Cleanup](#cleanup)
-- [Next Steps](#next-steps)
-
-## Introduction
-
-This tutorial shows how to use the Aurora scheduler to run (and
-"`printf-debug`") a hello world program on Mesos. The operational
-hierarchy is:
-
-- Aurora manages and schedules jobs for Mesos to run.
-- Mesos manages the individual tasks that make up a job.
-- Thermos manages the individual processes that make up a task.
-
-This is the recommended first Aurora users document to read to start
-getting up to speed on the system.
-
-To get help, email questions to the Aurora Developer List,
-[dev@aurora.incubator.apache.org](mailto:dev@aurora.incubator.apache.org)
-
-## Setup: Install Aurora
-
-You use the Aurora client and web UI to interact with Aurora jobs. To
-install it locally, see [vagrant.md](/documentation/latest/vagrant/). The remainder of this
-Tutorial assumes you are running Aurora using Vagrant.  Unless otherwise stated,
-all commands are to be run from the root of the aurora repository clone.
-
-## The Script
-
-Our "hello world" application is a simple Python script that loops
-forever, displaying the time every few seconds. Copy the code below and
-put it in a file named `hello_world.py` in the root of your Aurora repository clone (Note:
-this directory is the same as `/vagrant` inside the Vagrant VMs).
-
-The script has an intentional bug, which we will explain later on.
-
-<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
--->
-```python
-import sys
-import time
-
-def main(argv):
-  SLEEP_DELAY = 10
-  # Python ninjas - ignore this blatant bug.
-  for i in xrang(100):
-    print("Hello world! The time is now: %s. Sleeping for %d secs" % (
-      time.asctime(), SLEEP_DELAY))
-    sys.stdout.flush()
-    time.sleep(SLEEP_DELAY)
-
-if __name__ == "__main__":
-  main(sys.argv)
-```
-
-## Aurora Configuration
-
-Once we have our script/program, we need to create a *configuration
-file* that tells Aurora how to manage and launch our Job. Save the below
-code in the file `hello_world.aurora`.
-
-<!-- NOTE: If you are changing this file, be sure to also update examples/vagrant/test_tutorial.sh.
--->
-```python
-pkg_path = '/vagrant/hello_world.py'
-
-# we use a trick here to make the configuration change with
-# the contents of the file, for simplicity.  in a normal setting, packages would be
-# versioned, and the version number would be changed in the configuration.
-import hashlib
-with open(pkg_path, 'rb') as f:
-  pkg_checksum = hashlib.md5(f.read()).hexdigest()
-
-# copy hello_world.py into the local sandbox
-install = Process(
-  name = 'fetch_package',
-  cmdline = 'cp %s . && echo %s && chmod +x hello_world.py' % (pkg_path, pkg_checksum))
-
-# run the script
-hello_world = Process(
-  name = 'hello_world',
-  cmdline = 'python hello_world.py')
-
-# describe the task
-hello_world_task = SequentialTask(
-  processes = [install, hello_world],
-  resources = Resources(cpu = 1, ram = 1*MB, disk=8*MB))
-
-jobs = [
-  Service(cluster = 'devcluster',
-          environment = 'devel',
-          role = 'www-data',
-          name = 'hello_world',
-          task = hello_world_task)
-]
-```
-
-For more about Aurora configuration files, see the [Configuration
-Tutorial](/documentation/latest/configuration-tutorial/) and the [Aurora + Thermos
-Reference](/documentation/latest/configuration-reference/) (preferably after finishing this
-tutorial).
-
-## What's Going On In That Configuration File?
-
-More than you might think.
-
-1. From a "big picture" viewpoint, it first defines two
-Processes. Then it defines a Task that runs the two Processes in the
-order specified in the Task definition, as well as specifying what
-computational and memory resources are available for them.  Finally,
-it defines a Job that will schedule the Task on available and suitable
-machines. This Job is the sole member of a list of Jobs; you can
-specify more than one Job in a config file.
-
-2. At the Process level, it specifies how to get your code into the
-local sandbox in which it will run. It then specifies how the code is
-actually run once the second Process starts.
-
-## Creating the Job
-
-We're ready to launch our job! To do so, we use the Aurora Client to
-issue a Job creation request to the Aurora scheduler.
-
-Many Aurora Client commands take a *job key* argument, which uniquely
-identifies a Job. A job key consists of four parts, each separated by a
-"/". The four parts are  `<cluster>/<role>/<environment>/<jobname>`
-in that order. When comparing two job keys, if any of the
-four parts is different from its counterpart in the other key, then the
-two job keys identify two separate jobs. If all four values are
-identical, the job keys identify the same job.
-
-`/etc/aurora/clusters.json` within the Aurora scheduler has the available
-cluster names. For Vagrant, from the top-level of your Aurora repository clone,
-do:
-
-    $ vagrant ssh
-
-Followed by:
-
-    vagrant@precise64:~$ cat /etc/aurora/clusters.json
-
-You'll see something like:
-
-```javascript
-[{
-  "name": "devcluster",
-  "zk": "192.168.33.7",
-  "scheduler_zk_path": "/aurora/scheduler",
-  "auth_mechanism": "UNAUTHENTICATED"
-}]
-```
-
-Use a `name` value for your job key's cluster value.
-
-Role names are user accounts existing on the slave machines. If you don't know what accounts
-are available, contact your sysadmin.
-
-Environment names are namespaces; you can count on `prod`, `devel` and `test` existing.
-
-The Aurora Client command that actually runs our Job is `aurora create`. It creates a Job as
-specified by its job key and configuration file arguments and runs it.
-
-    aurora create <cluster>/<role>/<environment>/<jobname> <config_file>
-
-Or for our example:
-
-    aurora create devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
-
-This returns:
-
-    $ vagrant ssh
-    Welcome to Ubuntu 12.04 LTS (GNU/Linux 3.2.0-23-generic x86_64)
-
-     * Documentation:  https://help.ubuntu.com/
-    Welcome to your Vagrant-built virtual machine.
-    Last login: Fri Jan  3 02:18:55 2014 from 10.0.2.2
-    vagrant@precise64:~$ aurora create devcluster/www-data/devel/hello_world \
-        /vagrant/hello_world.aurora
-     INFO] Creating job hello_world
-     INFO] Response from scheduler: OK (message: 1 new tasks pending for job
-      www-data/devel/hello_world)
-     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
-
-## Watching the Job Run
-
-Now that our job is running, let's see what it's doing. Access the
-scheduler web interface at `http://$scheduler_hostname:$scheduler_port/scheduler`
-Or when using `vagrant`, `http://192.168.33.7:8081/scheduler`
-First we see what Jobs are scheduled:
-
-![Scheduled Jobs](images/ScheduledJobs.png)
-
-Click on your user name, which in this case was `www-data`, and we see the Jobs associated
-with that role:
-
-![Role Jobs](images/RoleJobs.png)
-
-If you click on your `hello_world` Job, you'll see:
-
-![hello_world Job](images/HelloWorldJob.png)
-
-Oops, looks like our first job didn't quite work! The task failed, so we have
-to figure out what went wrong.
-
-Access the page for our Task by clicking on its host.
-
-![Task page](images/TaskBreakdown.png)
-
-Once there, we see that the
-`hello_world` process failed. The Task page captures the standard error and
-standard output streams and makes them available. Clicking through
-to `stderr` on the failed `hello_world` process, we see what happened.
-
-![stderr page](images/stderr.png)
-
-It looks like we made a typo in our Python script. We wanted `xrange`,
-not `xrang`. Edit the `hello_world.py` script to use the correct function and
-we will try again.
-
-    aurora update devcluster/www-data/devel/hello_world /vagrant/hello_world.aurora
-
-This time, the task comes up, we inspect the page, and see that the
-`hello_world` process is running.
-
-![Running Task page](images/runningtask.png)
-
-We then inspect the output by clicking on `stdout` and see our process'
-output:
-
-![stdout page](images/stdout.png)
-
-## Cleanup
-
-Now that we're done, we kill the job using the Aurora client:
-
-    vagrant@precise64:~$ aurora killall devcluster/www-data/devel/hello_world
-     INFO] Killing tasks for job: devcluster/www-data/devel/hello_world
-     INFO] Response from scheduler: OK (message: Tasks killed.)
-     INFO] Job url: http://precise64:8081/scheduler/www-data/devel/hello_world
-    vagrant@precise64:~$
-
-The job page now shows the `hello_world` tasks as completed.
-
-![Killed Task page](images/killedtask.png)
-
-## Next Steps
-
-Now that you've finished this Tutorial, you should read or do the following:
-
-- [The Aurora Configuration Tutorial](/documentation/latest/configuration-tutorial/), which provides more examples
-  and best practices for writing Aurora configurations. You should also look at
-  the [Aurora + Thermos Configuration Reference](/documentation/latest/configuration-reference/).
-- The [Aurora User Guide](/documentation/latest/user-guide/) provides an overview of how Aurora, Mesos, and
-  Thermos work "under the hood".
-- Explore the Aurora Client - use the `aurora help` subcommand, and read the
-  [Aurora Client Commands](/documentation/latest/client-commands/) document.
diff --git a/source/documentation/latest/user-guide.md b/source/documentation/latest/user-guide.md
deleted file mode 100644
index dc81d29..0000000
--- a/source/documentation/latest/user-guide.md
+++ /dev/null
@@ -1,363 +0,0 @@
-Aurora User Guide
------------------
-
-- [Overview](#user-content-overview)
-- [Job Lifecycle](#user-content-job-lifecycle)
-	- [Life Of A Task](#user-content-life-of-a-task)
-	- [PENDING to RUNNING states](#user-content-pending-to-running-states)
-	- [Task Updates](#user-content-task-updates)
-	- [HTTP Health Checking and Graceful Shutdown](#user-content-http-health-checking-and-graceful-shutdown)
-		- [Tearing a task down](#user-content-tearing-a-task-down)
-	- [Giving Priority to Production Tasks: PREEMPTING](#user-content-giving-priority-to-production-tasks-preempting)
-	- [Natural Termination: FINISHED, FAILED](#user-content-natural-termination-finished-failed)
-	- [Forceful Termination: KILLING, RESTARTING](#user-content-forceful-termination-killing-restarting)
-- [Service Discovery](#user-content-service-discovery)
-- [Configuration](#user-content-configuration)
-- [Creating Jobs](#user-content-creating-jobs)
-- [Interacting With Jobs](#user-content-interacting-with-jobs)
-
-Overview
---------
-
-This document gives an overview of how Aurora works under the hood.
-It assumes you've already worked through the "hello world" example
-job in the [Aurora Tutorial](/documentation/latest/tutorial/). Specifics of how to use Aurora are **not**
- given here, but pointers to documentation about how to use Aurora are
-provided.
-
-Aurora is a Mesos framework used to schedule *jobs* onto Mesos. Mesos
-cares about individual *tasks*, but typical jobs consist of dozens or
-hundreds of task replicas. Aurora provides a layer on top of Mesos with
-its `Job` abstraction. An Aurora `Job` consists of a task template and
-instructions for creating near-identical replicas of that task (modulo
-things like "instance id" or specific port numbers which may differ from
-machine to machine).
-
-How many tasks make up a Job is complicated. On a basic level, a Job consists of
-one task template and instructions for creating near-idential replicas of that task
-(otherwise referred to as "instances" or "shards").
-
-However, since Jobs can be updated on the fly, a single Job identifier or *job key*
-can have multiple job configurations associated with it.
-
-For example, consider when I have a Job with 4 instances that each
-request 1 core of cpu, 1 GB of RAM, and 1 GB of disk space as specified
-in the configuration file `hello_world.aurora`. I want to
-update it so it requests 2 GB of RAM instead of 1. I create a new
-configuration file to do that called `new_hello_world.aurora` and
-issue a `aurora update --shards=0-1 <job_key_value> new_hello_world.aurora`
-command.
-
-This results in instances 0 and 1 having 1 cpu, 2 GB of RAM, and 1 GB of disk space,
-while instances 2 and 3 have 1 cpu, 1 GB of RAM, and 1 GB of disk space. If instance 3
-dies and restarts, it restarts with 1 cpu, 1 GB RAM, and 1 GB disk space.
-
-So that means there are two simultaneous task configurations for the same Job
-at the same time, just valid for different ranges of instances.
-
-This isn't a recommended pattern, but it is valid and supported by the
-Aurora scheduler. This most often manifests in the "canary pattern" where
-instance 0 runs with a different configuration than instances 1-N to test
-different code versions alongside the actual production job.
-
-A task can merely be a single *process* corresponding to a single
-command line, such as `python2.6 my_script.py`. However, a task can also
-consist of many separate processes, which all run within a single
-sandbox. For example, running multiple cooperating agents together,
-such as `logrotate`, `installer`, master, or slave processes. This is
-where Thermos  comes in. While Aurora provides a `Job` abstraction on
-top of Mesos `Tasks`, Thermos provides a `Process` abstraction
-underneath Mesos `Task`s and serves as part of the Aurora framework's
-executor.
-
-You define `Job`s,` Task`s, and `Process`es in a configuration file.
-Configuration files are written in Python, and make use of the Pystachio
-templating language. They end in a `.aurora` extension.
-
-Pystachio is a type-checked dictionary templating library.
-
-> TL;DR
->
-> -   Aurora manages jobs made of tasks.
-> -   Mesos manages tasks made of processes.
-> -   Thermos manages processes.
-> -   All defined in `.aurora` configuration file.
-
-![Aurora hierarchy](images/aurora_hierarchy.png)
-
-Each `Task` has a *sandbox* created when the `Task` starts and garbage
-collected when it finishes. All of a `Task'`s processes run in its
-sandbox, so processes can share state by using a shared current working
-directory.
-
-The sandbox garbage collection policy considers many factors, most
-importantly age and size. It makes a best-effort attempt to keep
-sandboxes around as long as possible post-task in order for service
-owners to inspect data and logs, should the `Task` have completed
-abnormally. But you can't design your applications assuming sandboxes
-will be around forever, e.g. by building log saving or other
-checkpointing mechanisms directly into your application or into your
-`Job` description.
-
-Job Lifecycle
--------------
-
-When Aurora reads a configuration file and finds a `Job` definition, it:
-
-1.  Evaluates the `Job` definition.
-2.  Splits the `Job` into its constituent `Task`s.
-3.  Sends those `Task`s to the scheduler.
-4.  The scheduler puts the `Task`s into `PENDING` state, starting each
-    `Task`'s life cycle.
-
-### Life Of A Task
-
-![Life of a task](images/lifeofatask.png)
-
-### PENDING to RUNNING states
-
-When a `Task` is in the `PENDING` state, the scheduler constantly
-searches for machines satisfying that `Task`'s resource request
-requirements (RAM, disk space, CPU time) while maintaining configuration
-constraints such as "a `Task` must run on machines  dedicated  to a
-particular role" or attribute limit constraints such as "at most 2
-`Task`s from the same `Job` may run on each rack". When the scheduler
-finds a suitable match, it assigns the `Task` to a machine and puts the
-`Task` into the `ASSIGNED` state.
-
-From the `ASSIGNED` state, the scheduler sends an RPC to the slave
-machine containing `Task` configuration, which the slave uses to spawn
-an executor responsible for the `Task`'s lifecycle. When the scheduler
-receives an acknowledgement that the machine has accepted the `Task`,
-the `Task` goes into `STARTING` state.
-
-`STARTING` state initializes a `Task` sandbox. When the sandbox is fully
-initialized, Thermos begins to invoke `Process`es. Also, the slave
-machine sends an update to the scheduler that the `Task` is
-in `RUNNING` state.
-
-If a `Task` stays in `ASSIGNED` or `STARTING` for too long, the
-scheduler forces it into `LOST` state, creating a new `Task` in its
-place that's sent into `PENDING` state. This is technically true of any
-active state: if the Mesos core tells the scheduler that a slave has
-become unhealthy (or outright disappeared), the `Task`s assigned to that
-slave go into `LOST` state and new `Task`s are created in their place.
-From `PENDING` state, there is no guarantee a `Task` will be reassigned
-to the same machine unless job constraints explicitly force it there.
-
-If there is a state mismatch, (e.g. a machine returns from a `netsplit`
-and the scheduler has marked all its `Task`s `LOST` and rescheduled
-them), a state reconciliation process kills the errant `RUNNING` tasks,
-which may take up to an hour. But to emphasize this point: there is no
-uniqueness guarantee for a single instance of a job in the presence of
-network partitions. If the Task requires that, it should be baked in at
-the application level using a distributed coordination service such as
-Zookeeper.
-
-### Task Updates
-
-`Job` configurations can be updated at any point in their lifecycle.
-Usually updates are done incrementally using a process called a *rolling
-upgrade*, in which Tasks are upgraded in small groups, one group at a
-time.  Updates are done using various Aurora Client commands.
-
-For a configuration update, the Aurora Client calculates required changes
-by examining the current job config state and the new desired job config.
-It then starts a rolling batched update process by going through every batch
-and performing these operations:
-
-- If an instance is present in the scheduler but isn't in the new config,
-  then that instance is killed.
-- If an instance is not present in the scheduler but is present in
-  the new config, then the instance is created.
-- If an instance is present in both the scheduler the new config, then
-  the client diffs both task configs. If it detects any changes, it
-  performs an instance update by killing the old config instance and adds
-  the new config instance.
-
-The Aurora client continues through the instance list until all tasks are
-updated, in `RUNNING,` and healthy for a configurable amount of time.
-If the client determines the update is not going well (a percentage of health
-checks have failed), it cancels the update.
-
-Update cancellation runs a procedure similar to the described above
-update sequence, but in reverse order. New instance configs are swapped
-with old instance configs and batch updates proceed backwards
-from the point where the update failed. E.g.; (0,1,2) (3,4,5) (6,7,
-8-FAIL) results in a rollback in order (8,7,6) (5,4,3) (2,1,0).
-
-### HTTP Health Checking and Graceful Shutdown
-
-The Executor implements a protocol for rudimentary control of a task via HTTP.  Tasks subscribe for
-this protocol by declaring a port named `health`.  Take for example this configuration snippet:
-
-    nginx = Process(
-      name = 'nginx',
-      cmdline = './run_nginx.sh -port {{thermos.ports[http]}}')
-
-When this Process is included in a job, the job will be allocated a port, and the command line
-will be replaced with something like:
-
-    ./run_nginx.sh -port 42816
-
-Where 42816 happens to be the allocated. port.  Typically, the Executor monitors Processes within
-a task only by liveness of the forked process.  However, when a `health` port was allocated, it will
-also send periodic HTTP health checks.  A task requesting a `health` port must handle the following
-requests:
-
-| HTTP request            | Description                             |
-| ------------            | -----------                             |
-| `GET /health`           | Inquires whether the task is healthy.   |
-| `POST /quitquitquit`    | Task should initiate graceful shutdown. |
-| `POST /abortabortabort` | Final warning task is being killed.     |
-
-Please see the
-[configuration reference](configuration-reference.md#user-content-healthcheckconfig-objects) for
-configuration options for this feature.
-
-#### Snoozing Health Checks
-
-If you need to pause your health check, you can do so by touching a file inside of your sandbox,
-named `.healthchecksnooze`
-
-As long as that file is present, health checks will be disabled, enabling users to gather core dumps
-or other performance measurements without worrying about Aurora's health check killing their
-process.
-
-WARNING: Remember to remove this when you are done, otherwise your instance will have permanently
-disabled health checks.
-
-#### Tearing a task down
-
-The Executor follows an escalation sequence when killing a running task:
-
-  1. If `health` port is not present, skip to (5)
-  2. POST /quitquitquit
-  3. wait 5 seconds
-  4. POST /abortabortabort
-  5. Send SIGTERM (`kill`)
-  6. Send SIGKILL (`kill -9`)
-
-If the Executor notices that all Processes in a Task have aborted during this sequence, it will
-not proceed with subsequent steps.  Note that graceful shutdown is best-effort, and due to the many
-inevitable realities of distributed systems, it may not be performed.
-
-### Giving Priority to Production Tasks: PREEMPTING
-
-Sometimes a Task needs to be interrupted, such as when a non-production
-Task's resources are needed by a higher priority production Task. This
-type of interruption is called a *pre-emption*. When this happens in
-Aurora, the non-production Task is killed and moved into
-the `PREEMPTING` state  when both the following are true:
-
-- The task being killed is a non-production task.
-- The other task is a `PENDING` production task that hasn't been
-  scheduled due to a lack of resources.
-
-Since production tasks are much more important, Aurora kills off the
-non-production task to free up resources for the production task. The
-scheduler UI shows the non-production task was preempted in favor of the
-production task. At some point, tasks in `PREEMPTING` move to `KILLED`.
-
-Note that non-production tasks consuming many resources are likely to be
-preempted in favor of production tasks.
-
-### Natural Termination: FINISHED, FAILED
-
-A `RUNNING` `Task` can terminate without direct user interaction. For
-example, it may be a finite computation that finishes, even something as
-simple as `echo hello world. `Or it could be an exceptional condition in
-a long-lived service. If the `Task` is successful (its underlying
-processes have succeeded with exit status `0` or finished without
-reaching failure limits) it moves into `FINISHED` state. If it finished
-after reaching a set of failure limits, it goes into `FAILED` state.
-
-### Forceful Termination: KILLING, RESTARTING
-
-You can terminate a `Task` by issuing an `aurora kill` command, which
-moves it into `KILLING` state. The scheduler then sends the slave  a
-request to terminate the `Task`. If the scheduler receives a successful
-response, it moves the Task into `KILLED` state and never restarts it.
-
-The scheduler has access to a non-public `RESTARTING` state. If a `Task`
-is forced into the `RESTARTING` state, the scheduler kills the
-underlying task but in parallel schedules an identical replacement for
-it.
-
-Configuration
--------------
-
-You define and configure your Jobs (and their Tasks and Processes) in
-Aurora configuration files. Their filenames end with the `.aurora`
-suffix, and you write them in Python making use of the Pystachio
-templating language, along
-with specific Aurora, Mesos, and Thermos commands and methods. See the
-[Configuration Guide and Reference](/documentation/latest/configuration-reference/) and
-[Configuration Tutorial](/documentation/latest/configuration-tutorial/).
-
-Service Discovery
------------------
-
-It is possible for the Aurora executor to announce tasks into ServerSets for
-the purpose of service discovery.  ServerSets use the Zookeeper [group membership pattern](http://zookeeper.apache.org/doc/trunk/recipes.html#sc_outOfTheBox)
-of which there are several reference implementations:
-
-  - [C++](https://github.com/apache/mesos/blob/master/src/zookeeper/group.cpp)
-  - [Java](https://github.com/twitter/commons/blob/master/src/java/com/twitter/common/zookeeper/ServerSetImpl.java#L221)
-  - [Python](https://github.com/twitter/commons/blob/master/src/python/twitter/common/zookeeper/serverset/serverset.py#L51)
-
-These can also be used natively in Finagle using the [ZookeeperServerSetCluster](https://github.com/twitter/finagle/blob/master/finagle-serversets/src/main/scala/com/twitter/finagle/zookeeper/ZookeeperServerSetCluster.scala).
-
-For more information about how to configure announcing, see the [Configuration Reference](/documentation/latest/configuration-reference/).
-
-Creating Jobs
--------------
-
-You create and manipulate Aurora Jobs with the Aurora client, which starts all its
-command line commands with
-`aurora`. See [Aurora Client Commands](/documentation/latest/client-commands/) for details
-about the Aurora Client.
-
-Interacting With Jobs
----------------------
-
-You interact with Aurora jobs either via:
-
-- Read-only Web UIs
-
-  Part of the output from creating a new Job is a URL for the Job's scheduler UI page.
-
-  For example:
-
-      vagrant@precise64:~$ aurora create devcluster/www-data/prod/hello \
-      /vagrant/examples/jobs/hello_world.aurora
-      INFO] Creating job hello
-      INFO] Response from scheduler: OK (message: 1 new tasks pending for job www-data/prod/hello)
-      INFO] Job url: http://precise64:8081/scheduler/www-data/prod/hello
-
-  The "Job url" goes to the Job's scheduler UI page. To go to the overall scheduler UI page,
-  stop at the "scheduler" part of the URL, in this case, `http://precise64:8081/scheduler`
-
-  You can also reach the scheduler UI page via the Client command `aurora open`:
-
-      aurora open [<cluster>[/<role>[/<env>/<job_name>]]]
-
-  If only the cluster is specified, it goes directly to that cluster's scheduler main page.
-  If the role is specified, it goes to the top-level role page. If the full job key is specified,
-  it goes directly to the job page where you can inspect individual tasks.
-
-  Once you click through to a role page, you see Jobs arranged separately by pending jobs, active
-  jobs, and finished jobs. Jobs are arranged by role, typically a service account for production
-  jobs and user accounts for test or development jobs.
-
-- The Aurora Client's command line interface
-
-  Several Client commands have a `-o` option that automatically opens a window to
-  the specified Job's scheduler UI URL. And, as described above, the `open` command also takes
-  you there.
-
-  For a complete list of Aurora Client commands, use `aurora help` and, for specific
-  command help, `aurora help [command]`. **Note**: `aurora help open`
-  returns `"subcommand open not found"` due to our reflection tricks not
-  working on words that are also builtin Python function names. Or see the
-  [Aurora Client Commands](/documentation/latest/client-commands/) document.
diff --git a/source/downloads.html.md b/source/downloads.html.md
deleted file mode 100644
index e544945..0000000
--- a/source/downloads.html.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Download Aurora
-
-## Current Release
-
-The current released version is *0.6.0-incubating*. [(.tar.gz)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz)
-[(md5)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.md5)
-[(sha)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.sha) 
-[(sig)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.6.0/apache-aurora-0.6.0-incubating.tar.gz.asc)
-
-To quickly get started, we reccomend using Vagrant and following the [Getting Started guide](http://localhost:4567/documentation/latest/vagrant/).
-
-## Previous Releases
-*0.5.0-incubating* [(.tar.gz)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz)
-[(md5)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.md5)
-[(sha)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.sha) 
-[(sig)](https://dist.apache.org/repos/dist/release/incubator/aurora/0.5.0/apache-aurora-0.5.0-incubating.tar.gz.asc)
-
-## Git
-
-The latest code is available in git via:
-
-    git clone http://git.apache.org/incubator-aurora.git
-
-You can browse the repo on
-[Apache Git](https://git-wip-us.apache.org/repos/asf?p=incubator-aurora.git)
-and the [Github mirror](https://github.com/apache/incubator-aurora).
diff --git a/source/downloads.html.md.erb b/source/downloads.html.md.erb
new file mode 100644
index 0000000..0e6ab57
--- /dev/null
+++ b/source/downloads.html.md.erb
@@ -0,0 +1,40 @@
+# Download Aurora
+
+If you're new, consider starting with the [Getting Started guide](/documentation/latest/getting-started/vagrant/).
+
+<!--
+  NOTE: current releases should use a mirrored URL (www.apache.org/dyn/...),
+  previous releases should live in the archive.
+  For details, see https://www.apache.org/dev/release.html#when-to-archive
+  Signature files are not propagated to mirrors, so we link directly to those.
+-->
+## Binary distributions
+Ubuntu 16.04 [ ![Download](https://api.bintray.com/packages/apache/aurora/ubuntu-xenial/images/download.svg) ](https://bintray.com/apache/aurora/ubuntu-xenial/_latestVersion)
+
+Ubuntu 14.04 [ ![Download](https://api.bintray.com/packages/apache/aurora/debian-ubuntu-trusty/images/download.svg) ](https://bintray.com/apache/aurora/debian-ubuntu-trusty/_latestVersion)
+
+Debian Jessie [ ![Download](https://api.bintray.com/packages/apache/aurora/debian-jessie/images/download.svg) ](https://bintray.com/apache/aurora/debian-jessie/_latestVersion)
+
+CentOS 7 [ ![Download](https://api.bintray.com/packages/apache/aurora/centos-7/images/download.svg) ](https://bintray.com/apache/aurora/centos-7/_latestVersion)
+
+Want packages for a different platform?  Let us [know](/community)!
+
+## Source distributions
+
+<% data.downloads.releases.each do |release| %>
+#### [<%= release.version %>](http://www.apache.org/dyn/closer.cgi?path=/aurora/<%= release.version %>/apache-aurora-<%= release.version %>.tar.gz)
+[signatures](https://www.apache.org/dist/aurora/<%= release.version %>/)
+| [commit log](https://gitbox.apache.org/repos/asf?p=aurora.git;a=log;h=refs/tags/rel/<%= release.version %>)
+<% if release.blog_post %>
+| [release notes](/blog/<%= release.blog_post %>/)
+<% end %>
+<% end %>
+
+### Git
+The latest code is available in git via:
+
+    git clone git://git.apache.org/aurora.git
+
+You can browse the repo on
+[Apache Git](https://gitbox.apache.org/repos/asf?p=aurora.git)
+and the [GitHub mirror](https://github.com/apache/aurora).
diff --git a/source/index.html.md b/source/index.html.md
index 38c8b82..d226f13 100644
--- a/source/index.html.md
+++ b/source/index.html.md
@@ -1,29 +1,34 @@
-<div class="container-fluid section-ltgreen buffer">
+<div class="container-fluid section-dark-flow buffer">
   <div class="container">
-  <div class="row">
-	  <div class="col-md-5 col-md-offset-1"><h2>What does Aurora do?</h2><p>Aurora runs applications and services across a shared pool of machines, and is responsible for keeping them running, forever. When machines experience failure, Aurora intelligently reschedules those jobs onto healthy machines.</p></div>
-	  <div class="col-md-4 col-md-offset-1">
-	<iframe
-    width="420" height="236"
-    src="http://www.youtube-nocookie.com/embed/asd_h6VzaJc?rel=0&amp;start=119&amp;controls=0"
-    allowfullscreen></iframe></div>
-  </div>
+    <div class="row">
+      <div class="col-md-5 col-md-offset-1">
+        <h2>What does Aurora do?</h2>
+        <p>
+        Aurora runs applications and services across a shared pool of machines,
+        and is responsible for keeping them running, forever. When machines experience
+        failure, Aurora intelligently reschedules those jobs onto healthy machines.
+        </p>
+      </div>
+      <div class="col-md-4 col-md-offset-1">
+        <iframe width="420" height="236" src="https://www.youtube.com/embed/asd_h6VzaJc" frameborder="0" allowfullscreen></iframe>
+      </div>
+    </div>
   </div>
 </div>
 <div class="container-fluid buffer">
   <div class="container">
   <h2 class="text-center">Key Aurora Features</h2>
   <div class="row">
-	  <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-tasks"></span></p></div>
-	  <div class="col-md-4"><h3>Rolling Updates with Automatic Rollback</h3><p>When updating a job, Aurora will detect the health and status of a deployment and automatically rollback if necessary.</p></div>
-	  <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-th"></span></p></div>
-	  <div class="col-md-4"><h3>Resource Quota and Multi-User Support</h3><p>Aurora has a quota system to provide guaranteed resources for specific applications, and can support multiple users to deploy services.</p></div>
+    <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-tasks"></span></p></div>
+    <div class="col-md-4"><h3>Rolling Updates with Automatic Rollback</h3><p>When updating a job, Aurora will detect the health and status of a deployment and automatically rollback if necessary.</p></div>
+    <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-th"></span></p></div>
+    <div class="col-md-4"><h3>Resource Quota and Multi-User Support</h3><p>Aurora has a quota system to provide guaranteed resources for specific applications, and can support multiple users to deploy services.</p></div>
   </div>
   <div class="row">
-	  <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-list-alt"></span></p></div>
-	  <div class="col-md-4"><h3>Sophisticated DSL</h3><p>Services are highly-configurable via a <a href="/documentation/latest/configuration-tutorial">DSL</a> which supports templating, allowing you to establish common patterns and avoid redundant configurations.</p></div>
-	  <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-cloud-upload"></span></p></div>
-	  <div class="col-md-4"><h3>Service Registration</h3><p>Aurora <a href="/documentation/latest/configuration-reference/#announcer-objects">announces</a> services to Apache ZooKeeper for discovery by clients like Finagle.</p></div>
+    <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-list-alt"></span></p></div>
+    <div class="col-md-4"><h3>Sophisticated DSL</h3><p>Services are highly-configurable via a <a href="/documentation/latest/reference/configuration-tutorial/">DSL</a> which supports templating, allowing you to establish common patterns and avoid redundant configurations.</p></div>
+    <div class="col-md-2 text-center"><p><span class="glyphicon glyphicon-cloud-upload"></span></p></div>
+    <div class="col-md-4"><h3>Service Registration</h3><p>Aurora <a href="/documentation/latest/features/service-discovery/">announces</a> services to Apache ZooKeeper for discovery by clients like Finagle.</p></div>
   </div>
  </div>
-</div>
\ No newline at end of file
+</div>
diff --git a/source/layouts/documentation.erb b/source/layouts/documentation.erb
index ff672df..553ec4f 100644
--- a/source/layouts/documentation.erb
+++ b/source/layouts/documentation.erb
@@ -2,13 +2,28 @@
 Documentation
 <% end %>
 <% wrap_layout :layout do %>
-<div class="container-fluid">
-	<div class="container">
-		<div class="row">
-		  <div class="col-md-10 col-md-offset-1">
-		  <%= yield %>
-	      </div>
-	    </row>
-	</div>
+<div class="col-md-12 documentation">
+<%=
+  # Create a variable for the release being rendered.
+  url_match = /\/documentation\/([^\/]+)(\/.*)/.match(current_page.url)
+  rendering_release = url_match.captures[0]
+  doc_page = url_match.captures[1]
+  nil
+=%>
+<h5 class="page-header text-uppercase">Documentation
+<select onChange="window.location.href='/documentation/' + this.value + '<%= doc_page =%>'"
+        value="<%= rendering_release =%>">
+  <% data.downloads.releases.each_with_index do |release, i| %>
+  <option value="<%= release.version %>"
+    <% if release.version == rendering_release %>selected="selected"<% end %>>
+    <%= release.version %>
+    <% if i == 0 %>
+      (latest)
+    <% end %>
+  </option>
+  <% end %>
+</select>
+</h5>
+<%= yield %>
 </div>
-<% end %>
\ No newline at end of file
+<% end %>
diff --git a/source/layouts/layout.erb b/source/layouts/layout.erb
index bb9a73f..38efc27 100644
--- a/source/layouts/layout.erb
+++ b/source/layouts/layout.erb
@@ -21,17 +21,17 @@
 	</script>
   </head>
   <body>
-	  <% if current_page.url == "/" %>
-	    <%= partial "header_homepage" %>
-		<%= yield %>
-      <% else %>
-        <%= partial "header_normal" %>	
-  	  <div class="container-fluid">
-  	  	<div class="container content">
-          <%= yield %>
-  		</div>
-  	  </div>
-	  <% end %>
-      <%= partial "footer" %>
-	</body>
-</html>
\ No newline at end of file
+  <% if current_page.url == "/" %>
+    <%= partial "header_homepage" %>
+    <%= yield %>
+  <% else %>
+    <%= partial "header_normal" %>	
+    <div class="container-fluid">
+      <div class="container content">
+        <%= yield %>
+      </div>
+    </div>
+  <% end %>
+  <%= partial "footer" %>
+  </body>
+</html>
diff --git a/source/layouts/post.erb b/source/layouts/post.erb
index 4c01f8f..53fac60 100644
--- a/source/layouts/post.erb
+++ b/source/layouts/post.erb
@@ -13,14 +13,17 @@
 			<% end %>
 			<span class="author_contact">
 			  <p><strong><%= current_page.data.post_author.display_name %></strong></p>
-			  <p><a href="http://twitter.com/<%= current_page.data.post_author.twitter %>">@<%= current_page.data.post_author.twitter %></a></p>
+			  <% if current_page.data.post_author.twitter %>
+			    <p><a href="http://twitter.com/<%= current_page.data.post_author.twitter %>">@<%= current_page.data.post_author.twitter 
+%></a></p>
+			  <% end %>
 			</span>
 		</span>
 		<p><em>Posted <%= current_article.date.strftime("%B %e, %Y")  %></em></p>
 	</div>
 	
 	<div class="share">
-		<span class="social-share-button"><a href="https://twitter.com/share" class="twitter-share-button" data-via="apachemesos">Tweet</a></span>
+		<span class="social-share-button"><a href="https://twitter.com/share" class="twitter-share-button" data-via="ApacheAurora">Tweet</a></span>
 		
 		<span><script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script></span>
 		
@@ -37,4 +40,4 @@
 	<%= yield %>
 </div>
 </div>
-<% end %>
\ No newline at end of file
+<% end %>
diff --git a/source/sitemap.xml.erb b/source/sitemap.xml.erb
new file mode 100644
index 0000000..d5bedee
--- /dev/null
+++ b/source/sitemap.xml.erb
@@ -0,0 +1,10 @@
+<% pages = sitemap.resources.select { |page| page.path =~ /\.html/ } %>
+<?xml version="1.0" encoding="UTF-8"?>
+<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
+<% pages.each do |page| %>
+  <url>
+    <loc>http://aurora.apache.org/<%= page.destination_path.gsub('index.html','') %></loc>
+    <lastmod><%= Date.today.to_time.iso8601 %></lastmod>
+  </url>
+<% end %>
+</urlset>
\ No newline at end of file